code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
from django.db import models from django.contrib.contenttypes.models import ContentType from widgy.generic import ProxyGenericForeignKey, ProxyGenericRelation from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation class Base(models.Model): content_type = models.ForeignKey(ContentType) content_id = models.PositiveIntegerField() obj = ProxyGenericForeignKey('content_type', 'content_id') class Related(models.Model): bases = ProxyGenericRelation(Base, content_type_field='content_type', object_id_field='content_id') content = models.CharField(max_length=255) class AbstractModel(models.Model): bases = ProxyGenericRelation(Base, content_type_field='content_type', object_id_field='content_id') class Meta: abstract = True class ConcreteModel(AbstractModel): pass class Proxy(Related): def some_method(self): return True class Meta: proxy = True
normal
{ "blob_id": "c70df1fab0db6f71d22a23836b11d66879879656", "index": 6336, "step-1": "<mask token>\n\n\nclass Related(models.Model):\n <mask token>\n <mask token>\n\n\nclass AbstractModel(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n\n\n class Meta:\n abstract = True\n\n\nclass ConcreteModel(AbstractModel):\n pass\n\n\nclass Proxy(Related):\n\n def some_method(self):\n return True\n\n\n class Meta:\n proxy = True\n", "step-2": "<mask token>\n\n\nclass Related(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n content = models.CharField(max_length=255)\n\n\nclass AbstractModel(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n\n\n class Meta:\n abstract = True\n\n\nclass ConcreteModel(AbstractModel):\n pass\n\n\nclass Proxy(Related):\n\n def some_method(self):\n return True\n\n\n class Meta:\n proxy = True\n", "step-3": "<mask token>\n\n\nclass Base(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Related(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n content = models.CharField(max_length=255)\n\n\nclass AbstractModel(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n\n\n class Meta:\n abstract = True\n\n\nclass ConcreteModel(AbstractModel):\n pass\n\n\nclass Proxy(Related):\n\n def some_method(self):\n return True\n\n\n class Meta:\n proxy = True\n", "step-4": "<mask token>\n\n\nclass Base(models.Model):\n content_type = models.ForeignKey(ContentType)\n content_id = models.PositiveIntegerField()\n obj = ProxyGenericForeignKey('content_type', 'content_id')\n\n\nclass Related(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n content = models.CharField(max_length=255)\n\n\nclass AbstractModel(models.Model):\n bases = ProxyGenericRelation(Base, content_type_field='content_type',\n object_id_field='content_id')\n\n\n class Meta:\n abstract = True\n\n\nclass ConcreteModel(AbstractModel):\n pass\n\n\nclass Proxy(Related):\n\n def some_method(self):\n return True\n\n\n class Meta:\n proxy = True\n", "step-5": "from django.db import models\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom widgy.generic import ProxyGenericForeignKey, ProxyGenericRelation\nfrom django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation\n\n\nclass Base(models.Model):\n content_type = models.ForeignKey(ContentType)\n content_id = models.PositiveIntegerField()\n obj = ProxyGenericForeignKey('content_type', 'content_id')\n\n\nclass Related(models.Model):\n bases = ProxyGenericRelation(Base,\n content_type_field='content_type',\n object_id_field='content_id')\n\n content = models.CharField(max_length=255)\n\n\nclass AbstractModel(models.Model):\n bases = ProxyGenericRelation(Base,\n content_type_field='content_type',\n object_id_field='content_id')\n class Meta:\n abstract = True\n\nclass ConcreteModel(AbstractModel):\n pass\n\nclass Proxy(Related):\n def some_method(self):\n return True\n\n class Meta:\n proxy = True\n", "step-ids": [ 6, 7, 8, 9, 11 ] }
[ 6, 7, 8, 9, 11 ]
import re import requests import numpy as np import json import os from collections import OrderedDict import pandas as pd import json import datetime import time #将数组写入json文件方便pandas的读取 def write_list_to_json(list, json_file_name, json_file_save_path): os.chdir(json_file_save_path) with open(json_file_name, 'w') as f: json.dump(list, f) #获取数据算法 def getworld_data(url,header): headers = header res = requests.get(url,headers = headers) res.encoding = "UTF-8" pattern = re.compile('(\'\{"(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\}\')',re.S) end = re.findall(pattern,res.text) a=str(end[0]) with open('test.txt','w') as f: f.write(a) data_relative_confirmed_json=[] pattern_1 = re.compile('(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}',re.S) end_1=re.findall(pattern_1,a) return end_1 #时间推算算法及数据写入 def count_time(end_1): data_relative_confirmed_json=[] country=[] for i in range(len(end_1)): data={ 'Country':'', } data['Country']=end_1[i][0] #确诊人数 country.append(end_1[i][0]) care=end_1[i][5].replace('[','').replace(']','').split(',') try: time=end_1[i][6].replace('/',',').replace('/',',').replace('"','').split(',') print(time) time[2]='2020' date=[] in_date = time[2]+'-'+time[0]+'-'+time[1] dt = datetime.datetime.strptime(in_date, "%Y-%m-%d") for k in range(len(end_1[i][5].replace('[','').replace(']','').split(','))): out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d") dt=datetime.datetime.strptime(out_date, "%Y-%m-%d") date.append(out_date) print(date) time_care=OrderedDict(zip(date,care)) print(time_care) date_json=OrderedDict(data,**time_care) data_relative_confirmed_json.append(date_json) except: pass return data_relative_confirmed_json def write_json_to_csv(data_relative_confirmed_json,end_1): write_list_to_json(data_relative_confirmed_json,'20200517-world-active-data.json','E:/python_code/world_cov19') data_csv=pd.DataFrame(json.loads(open('20200517-world-active-data.json','r+').read())) print(end_1[36][0]) care=end_1[36][5].replace('[','').replace(']','').split(',') try: time=end_1[36][6].replace('/',',').replace('/',',').replace('"','').split(',') print(time) time[2]='2020' date=[] in_date = time[2]+'-'+time[0]+'-'+time[1] dt = datetime.datetime.strptime(in_date, "%Y-%m-%d") for k in range(len(end_1[36][5].replace('[','').replace(']','').split(','))): out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d") dt=datetime.datetime.strptime(out_date, "%Y-%m-%d") date.append(out_date) print(date) time_care=OrderedDict(zip(date,care)) print(time_care) except: pass date.insert(0,'Country') cols=date data_csv=data_csv.loc[:,cols] data_csv.T data_csv.to_csv('20200517-world-active-data.json.csv') df=pd.read_csv('20200517-world-active-data.json.csv') new_csv=df.T new_csv.to_csv('20200517-world-active-data.json.csv')
normal
{ "blob_id": "0677e12bc9733c76bff7ed3fe83e3800e64e9a10", "index": 7633, "step-1": "<mask token>\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\n<mask token>\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n", "step-2": "<mask token>\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n", "step-3": "<mask token>\n\n\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n", "step-4": "import re\nimport requests\nimport numpy as np\nimport json\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport json\nimport datetime\nimport time\n\n\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n", "step-5": "import re\nimport requests\nimport numpy as np\nimport json\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport json\nimport datetime\nimport time\n#将数组写入json文件方便pandas的读取\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n#获取数据算法\ndef getworld_data(url,header):\n headers = header\n res = requests.get(url,headers = headers)\n res.encoding = \"UTF-8\"\n pattern = re.compile('(\\'\\{\"(\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\}\\')',re.S)\n end = re.findall(pattern,res.text)\n a=str(end[0])\n with open('test.txt','w') as f:\n f.write(a)\n data_relative_confirmed_json=[]\n pattern_1 = re.compile('(\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}',re.S)\n end_1=re.findall(pattern_1,a)\n return end_1\n\n#时间推算算法及数据写入\ndef count_time(end_1):\n data_relative_confirmed_json=[]\n country=[]\n for i in range(len(end_1)):\n data={\n 'Country':'',\n }\n data['Country']=end_1[i][0]\n #确诊人数\n country.append(end_1[i][0])\n care=end_1[i][5].replace('[','').replace(']','').split(',')\n try:\n time=end_1[i][6].replace('/',',').replace('/',',').replace('\"','').split(',')\n print(time)\n time[2]='2020'\n date=[]\n in_date = time[2]+'-'+time[0]+'-'+time[1]\n dt = datetime.datetime.strptime(in_date, \"%Y-%m-%d\")\n for k in range(len(end_1[i][5].replace('[','').replace(']','').split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n dt=datetime.datetime.strptime(out_date, \"%Y-%m-%d\")\n date.append(out_date)\n print(date)\n time_care=OrderedDict(zip(date,care))\n print(time_care)\n date_json=OrderedDict(data,**time_care)\n data_relative_confirmed_json.append(date_json)\n \n except:\n pass\n return data_relative_confirmed_json\n\ndef write_json_to_csv(data_relative_confirmed_json,end_1):\n write_list_to_json(data_relative_confirmed_json,'20200517-world-active-data.json','E:/python_code/world_cov19')\n data_csv=pd.DataFrame(json.loads(open('20200517-world-active-data.json','r+').read()))\n print(end_1[36][0])\n care=end_1[36][5].replace('[','').replace(']','').split(',')\n try:\n time=end_1[36][6].replace('/',',').replace('/',',').replace('\"','').split(',')\n print(time)\n time[2]='2020'\n date=[]\n in_date = time[2]+'-'+time[0]+'-'+time[1]\n dt = datetime.datetime.strptime(in_date, \"%Y-%m-%d\")\n for k in range(len(end_1[36][5].replace('[','').replace(']','').split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n dt=datetime.datetime.strptime(out_date, \"%Y-%m-%d\")\n date.append(out_date)\n print(date)\n time_care=OrderedDict(zip(date,care))\n print(time_care)\n except:\n pass\n date.insert(0,'Country')\n cols=date\n data_csv=data_csv.loc[:,cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df=pd.read_csv('20200517-world-active-data.json.csv')\n new_csv=df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class MyLog(LogBase): <|reserved_special_token_0|> <|reserved_special_token_0|> def get_logger(self): return self._get_logger() @staticmethod def type_need(parm, type_): if not isinstance(parm, type_): raise TypeError(f'expect {type_},but got {type(parm)}') <|reserved_special_token_1|> <|reserved_special_token_0|> class MyLog(LogBase): """ 功能: 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz 参数: :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹 :param logger_name: logger对象的名字 :param info_name: 保存info等级的文件名字 :param error_name: :param warning_name: :param debug_name: :param interval: 压缩日志的频率,默认是7天 :param detail: bool值,记录日志是否为详细记录 :param debug: 是否记录debug,默认不记录 :param info: 是否记录info,默认记录 :param error: :param warning: 实例方法: get_logger()-->logger 使用举例: # 记录四种类型的日志 logger = MyLog(debug=True).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') # # # # # # # # # # # # # # # # # # # # # # # # # # 只记录错误日志 logger = MyLog(info=False,warning=False).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') 注意: MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性. 例如: mylog = MyLog('./logs/logs/') mylog2 = MyLog() logger = mylog.get_logger() logger2 = mylog2.get_logger() logger.info('info') logger2 = MyLog('./logs/logs2/').get_logger() logger2.info('info2') 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下 """ def __init__(self, log_path: str='./logs/', **kwargs): self.type_need(log_path, str) if not log_path.endswith('/'): log_path += '/' if not os.path.exists(log_path): os.makedirs(log_path) super(MyLog, self).__init__(dir_path=log_path, **kwargs) def get_logger(self): return self._get_logger() @staticmethod def type_need(parm, type_): if not isinstance(parm, type_): raise TypeError(f'expect {type_},but got {type(parm)}') <|reserved_special_token_1|> <|reserved_special_token_0|> __all__ = ['MyLog'] class MyLog(LogBase): """ 功能: 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz 参数: :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹 :param logger_name: logger对象的名字 :param info_name: 保存info等级的文件名字 :param error_name: :param warning_name: :param debug_name: :param interval: 压缩日志的频率,默认是7天 :param detail: bool值,记录日志是否为详细记录 :param debug: 是否记录debug,默认不记录 :param info: 是否记录info,默认记录 :param error: :param warning: 实例方法: get_logger()-->logger 使用举例: # 记录四种类型的日志 logger = MyLog(debug=True).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') # # # # # # # # # # # # # # # # # # # # # # # # # # 只记录错误日志 logger = MyLog(info=False,warning=False).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') 注意: MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性. 例如: mylog = MyLog('./logs/logs/') mylog2 = MyLog() logger = mylog.get_logger() logger2 = mylog2.get_logger() logger.info('info') logger2 = MyLog('./logs/logs2/').get_logger() logger2.info('info2') 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下 """ def __init__(self, log_path: str='./logs/', **kwargs): self.type_need(log_path, str) if not log_path.endswith('/'): log_path += '/' if not os.path.exists(log_path): os.makedirs(log_path) super(MyLog, self).__init__(dir_path=log_path, **kwargs) def get_logger(self): return self._get_logger() @staticmethod def type_need(parm, type_): if not isinstance(parm, type_): raise TypeError(f'expect {type_},but got {type(parm)}') <|reserved_special_token_1|> from .log_config import LogBase import os __all__ = ['MyLog'] class MyLog(LogBase): """ 功能: 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz 参数: :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹 :param logger_name: logger对象的名字 :param info_name: 保存info等级的文件名字 :param error_name: :param warning_name: :param debug_name: :param interval: 压缩日志的频率,默认是7天 :param detail: bool值,记录日志是否为详细记录 :param debug: 是否记录debug,默认不记录 :param info: 是否记录info,默认记录 :param error: :param warning: 实例方法: get_logger()-->logger 使用举例: # 记录四种类型的日志 logger = MyLog(debug=True).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') # # # # # # # # # # # # # # # # # # # # # # # # # # 只记录错误日志 logger = MyLog(info=False,warning=False).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') 注意: MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性. 例如: mylog = MyLog('./logs/logs/') mylog2 = MyLog() logger = mylog.get_logger() logger2 = mylog2.get_logger() logger.info('info') logger2 = MyLog('./logs/logs2/').get_logger() logger2.info('info2') 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下 """ def __init__(self, log_path: str='./logs/', **kwargs): self.type_need(log_path, str) if not log_path.endswith('/'): log_path += '/' if not os.path.exists(log_path): os.makedirs(log_path) super(MyLog, self).__init__(dir_path=log_path, **kwargs) def get_logger(self): return self._get_logger() @staticmethod def type_need(parm, type_): if not isinstance(parm, type_): raise TypeError(f'expect {type_},but got {type(parm)}') <|reserved_special_token_1|> # -*- coding: utf-8 -*- from .log_config import LogBase import os __all__ = ['MyLog'] class MyLog(LogBase): """ 功能: 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz 参数: :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹 :param logger_name: logger对象的名字 :param info_name: 保存info等级的文件名字 :param error_name: :param warning_name: :param debug_name: :param interval: 压缩日志的频率,默认是7天 :param detail: bool值,记录日志是否为详细记录 :param debug: 是否记录debug,默认不记录 :param info: 是否记录info,默认记录 :param error: :param warning: 实例方法: get_logger()-->logger 使用举例: # 记录四种类型的日志 logger = MyLog(debug=True).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') # # # # # # # # # # # # # # # # # # # # # # # # # # 只记录错误日志 logger = MyLog(info=False,warning=False).get_logger() logger.info('info') logger.debug('debug') logger.error('error') logger.warning('warning') 注意: MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性. 例如: mylog = MyLog('./logs/logs/') mylog2 = MyLog() logger = mylog.get_logger() logger2 = mylog2.get_logger() logger.info('info') logger2 = MyLog('./logs/logs2/').get_logger() logger2.info('info2') 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下 """ def __init__(self, log_path: str = './logs/', **kwargs): self.type_need(log_path, str) if not log_path.endswith('/'): log_path += '/' if not os.path.exists(log_path): os.makedirs(log_path) super(MyLog, self).__init__(dir_path=log_path, **kwargs) def get_logger(self): return self._get_logger() @staticmethod def type_need(parm, type_): if not isinstance(parm, type_): raise TypeError(f'expect {type_},but got {type(parm)}')
flexible
{ "blob_id": "3a9987ac326131878b80cb819e3d06ce2f4cb054", "index": 8461, "step-1": "<mask token>\n\n\nclass MyLog(LogBase):\n <mask token>\n <mask token>\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n", "step-2": "<mask token>\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n", "step-3": "<mask token>\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n", "step-4": "from .log_config import LogBase\nimport os\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n", "step-5": "# -*- coding: utf-8 -*-\r\n\r\nfrom .log_config import LogBase\r\nimport os\r\n\r\n__all__ = ['MyLog']\r\n\r\n\r\nclass MyLog(LogBase):\r\n \"\"\"\r\n 功能:\r\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\r\n\r\n 参数:\r\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\r\n :param logger_name: logger对象的名字\r\n :param info_name: 保存info等级的文件名字\r\n :param error_name:\r\n :param warning_name:\r\n :param debug_name:\r\n :param interval: 压缩日志的频率,默认是7天\r\n :param detail: bool值,记录日志是否为详细记录\r\n :param debug: 是否记录debug,默认不记录\r\n :param info: 是否记录info,默认记录\r\n :param error:\r\n :param warning:\r\n 实例方法:\r\n get_logger()-->logger\r\n\r\n 使用举例:\r\n # 记录四种类型的日志\r\n logger = MyLog(debug=True).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n\r\n # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\n # 只记录错误日志\r\n logger = MyLog(info=False,warning=False).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n 注意:\r\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\r\n 例如:\r\n\r\n mylog = MyLog('./logs/logs/')\r\n mylog2 = MyLog()\r\n logger = mylog.get_logger()\r\n logger2 = mylog2.get_logger()\r\n logger.info('info')\r\n\r\n logger2 = MyLog('./logs/logs2/').get_logger()\r\n logger2.info('info2')\r\n\r\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\r\n\r\n\r\n\r\n \"\"\"\r\n\r\n def __init__(self, log_path: str = './logs/', **kwargs):\r\n self.type_need(log_path, str)\r\n if not log_path.endswith('/'):\r\n log_path += '/'\r\n if not os.path.exists(log_path):\r\n os.makedirs(log_path)\r\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\r\n\r\n def get_logger(self):\r\n return self._get_logger()\r\n\r\n @staticmethod\r\n def type_need(parm, type_):\r\n if not isinstance(parm, type_):\r\n raise TypeError(f'expect {type_},but got {type(parm)}')\r\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class DrinkFilter(django_filters.FilterSet): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Meta: model = Drinks fields = ['name', 'brands'] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class DrinkFilter(django_filters.FilterSet): BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand. objects.all()) name = django_filters.CharFilter(lookup_expr='icontains') price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt' ) price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt' ) likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt' ) likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt' ) brands = django_filters.MultipleChoiceFilter(field_name='brand__name', choices=BRAND_CHOICES) class Meta: model = Drinks fields = ['name', 'brands'] <|reserved_special_token_0|> <|reserved_special_token_1|> import django_filters from .models import Drinks, Brand class DrinkFilter(django_filters.FilterSet): BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand. objects.all()) name = django_filters.CharFilter(lookup_expr='icontains') price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt' ) price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt' ) likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt' ) likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt' ) brands = django_filters.MultipleChoiceFilter(field_name='brand__name', choices=BRAND_CHOICES) class Meta: model = Drinks fields = ['name', 'brands'] <|reserved_special_token_0|> <|reserved_special_token_1|> import django_filters from .models import Drinks, Brand class DrinkFilter(django_filters.FilterSet): BRAND_CHOICES = tuple( (brand.name, brand.name) for brand in Brand.objects.all()) name = django_filters.CharFilter(lookup_expr='icontains') price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt') price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt') likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt') likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt') brands = django_filters.MultipleChoiceFilter(field_name='brand__name', choices=BRAND_CHOICES) class Meta: model = Drinks fields = ['name', 'brands'] """ f = F({'date_after': '2016-01-01', 'date_before': '2016-02-01'}) """
flexible
{ "blob_id": "a096e811e50e25e47a9b76b1f813c51f4307bbfe", "index": 331, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass DrinkFilter(django_filters.FilterSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.\n objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'\n )\n price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'\n )\n likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'\n )\n likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'\n )\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<mask token>\n", "step-4": "import django_filters\nfrom .models import Drinks, Brand\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.\n objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'\n )\n price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'\n )\n likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'\n )\n likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'\n )\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<mask token>\n", "step-5": "import django_filters\nfrom .models import Drinks, Brand\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple(\n (brand.name, brand.name) for brand in Brand.objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price',\n lookup_expr='lt')\n price_gt = django_filters.NumberFilter(field_name='price',\n lookup_expr='gt')\n likes_lt = django_filters.NumberFilter(field_name='likes',\n lookup_expr='lt')\n likes_gt = django_filters.NumberFilter(field_name='likes',\n lookup_expr='gt')\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n\"\"\"\nf = F({'date_after': '2016-01-01', 'date_before': '2016-02-01'})\n\"\"\"", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import requests import codecs import urllib.request import time from bs4 import BeautifulSoup from html.parser import HTMLParser import re import os #input Result_File="report.txt" #deleting result file if exists if os.path.exists(Result_File): os.remove(Result_File) #reading html file and parsing logic f=codecs.open("test.html", 'r', 'utf-8') xhtml = f.read() data = [] # instantiate the parser and feed data to it soup = BeautifulSoup(xhtml,"html.parser") #print(soup) main_table = soup.find('table', { 'id': 'octable' }) #print(main_table) with open(Result_File, 'w') as r: r.write("OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE") for rows in main_table.find_all('tr'): for cell in rows.find_all('td'): #print(data) if(len(cell.text) != 0): cell_text = cell.text.strip() a = re.sub(r"\n", "", cell_text, 0) r.write(a) r.write("|") r.write("\n")
normal
{ "blob_id": "869bbc8da8cdb5de0bcaf5664b5482814daae53a", "index": 6212, "step-1": "<mask token>\n", "step-2": "<mask token>\nif os.path.exists(Result_File):\n os.remove(Result_File)\n<mask token>\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n", "step-3": "<mask token>\nResult_File = 'report.txt'\nif os.path.exists(Result_File):\n os.remove(Result_File)\nf = codecs.open('test.html', 'r', 'utf-8')\nxhtml = f.read()\ndata = []\nsoup = BeautifulSoup(xhtml, 'html.parser')\nmain_table = soup.find('table', {'id': 'octable'})\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n", "step-4": "import requests\nimport codecs\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom html.parser import HTMLParser\nimport re\nimport os\nResult_File = 'report.txt'\nif os.path.exists(Result_File):\n os.remove(Result_File)\nf = codecs.open('test.html', 'r', 'utf-8')\nxhtml = f.read()\ndata = []\nsoup = BeautifulSoup(xhtml, 'html.parser')\nmain_table = soup.find('table', {'id': 'octable'})\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n", "step-5": "import requests\nimport codecs\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom html.parser import HTMLParser\nimport re\nimport os\n\n#input\nResult_File=\"report.txt\"\n\n#deleting result file if exists\nif os.path.exists(Result_File):\n os.remove(Result_File)\n\n#reading html file and parsing logic\nf=codecs.open(\"test.html\", 'r', 'utf-8')\nxhtml = f.read()\ndata = []\n# instantiate the parser and feed data to it\nsoup = BeautifulSoup(xhtml,\"html.parser\")\n#print(soup)\nmain_table = soup.find('table', { 'id': 'octable' })\n#print(main_table)\nwith open(Result_File, 'w') as r:\n r.write(\"OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE\")\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n\n#print(data)\n if(len(cell.text) != 0):\n cell_text = cell.text.strip()\n a = re.sub(r\"\\n\", \"\", cell_text, 0)\n\n r.write(a)\n r.write(\"|\")\n r.write(\"\\n\")\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> g.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#') g.parse('./ontology.ttl', format='ttl') sleep(0.5) <|reserved_special_token_0|> for result in results: uri, geometry_type = result gtypes[str(uri)] = str(geometry_type).split('#')[1] <|reserved_special_token_0|> for result in results: uri, dtype = result dtypes[str(uri)] = str(dtype).split('#')[1] <|reserved_special_token_0|> for result in results: dataset, label, atype = result key = str(dataset), str(label) if atype is None and key not in atypes: atypes[key] = '' elif atype is not None: atypes[key] = str(atype).split('#')[1] <|reserved_special_token_0|> with open('./datasets/annotations_datasets.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_gtypes[row[0]] = row[1] test_dtypes[row[0]] = row[2] with open('./datasets/annotations_attributes.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_atypes[row[0], row[1]] = row[2] <|reserved_special_token_0|> for k, v in gtypes.items(): if k not in test_gtypes: continue total += 1 if test_gtypes[k] == v: tp += 1 fn -= 1 <|reserved_special_token_0|> print('Geometry type scores:') print(f'P: {p} , R: {r} , F: {f}') <|reserved_special_token_0|> for k, v in dtypes.items(): if k not in test_dtypes: continue total += 1 if test_dtypes[k] == v: tp += 1 fn -= 1 <|reserved_special_token_0|> print('Dataset type scores:') print(f'P: {p} , R: {r} , F: {f}') <|reserved_special_token_0|> if filter_nontypes: test_atypes = {k: v for k, v in test_atypes.items() if v != ''} atypes = {k: v for k, v in atypes.items() if v != ''} <|reserved_special_token_0|> for k, v in atypes.items(): if k not in test_atypes: continue if v != '': total += 1 if test_atypes[k] == v: tp += 1 fn -= 1 elif v == 'BooleanA' and test_atypes[k] == 'NominalA': tp += 1 fn -= 1 else: print(k, v, test_atypes[k]) <|reserved_special_token_0|> print('Attribute type scores:') print(f'P: {p} , R: {r} , F: {f}') <|reserved_special_token_1|> <|reserved_special_token_0|> gtypes = {} dtypes = {} atypes = {} g = rdflib.Graph() g.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#') g.parse('./ontology.ttl', format='ttl') sleep(0.5) results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . filter ( ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet ) ) } """ ) for result in results: uri, geometry_type = result gtypes[str(uri)] = str(geometry_type).split('#')[1] results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . ?type rdfs:subClassOf+ ccd:CoreConceptDataSet . } """ ) for result in results: uri, dtype = result dtypes[str(uri)] = str(dtype).split('#')[1] results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?label ?type where { ?attribute ada:ofDataSet ?dataset ; skos:exactMatch ?concept ; rdfs:label ?label . optional { ?concept a ?type . ?type rdfs:subClassOf+ ccd:Attribute . } } group by ?dataset ?label ?type """ ) for result in results: dataset, label, atype = result key = str(dataset), str(label) if atype is None and key not in atypes: atypes[key] = '' elif atype is not None: atypes[key] = str(atype).split('#')[1] test_gtypes = {} test_dtypes = {} test_atypes = {} with open('./datasets/annotations_datasets.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_gtypes[row[0]] = row[1] test_dtypes[row[0]] = row[2] with open('./datasets/annotations_attributes.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_atypes[row[0], row[1]] = row[2] tp = 0 total = 0 fn = len(test_gtypes) for k, v in gtypes.items(): if k not in test_gtypes: continue total += 1 if test_gtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Geometry type scores:') print(f'P: {p} , R: {r} , F: {f}') tp = 0 total = 0 fn = len(test_dtypes) for k, v in dtypes.items(): if k not in test_dtypes: continue total += 1 if test_dtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Dataset type scores:') print(f'P: {p} , R: {r} , F: {f}') filter_nontypes = True if filter_nontypes: test_atypes = {k: v for k, v in test_atypes.items() if v != ''} atypes = {k: v for k, v in atypes.items() if v != ''} tp = 0 total = 0 fn = len(list(filter(lambda x: x != '', test_atypes.values()))) for k, v in atypes.items(): if k not in test_atypes: continue if v != '': total += 1 if test_atypes[k] == v: tp += 1 fn -= 1 elif v == 'BooleanA' and test_atypes[k] == 'NominalA': tp += 1 fn -= 1 else: print(k, v, test_atypes[k]) p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Attribute type scores:') print(f'P: {p} , R: {r} , F: {f}') <|reserved_special_token_1|> import rdflib import csv from time import sleep gtypes = {} dtypes = {} atypes = {} g = rdflib.Graph() g.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#') g.parse('./ontology.ttl', format='ttl') sleep(0.5) results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . filter ( ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet ) ) } """ ) for result in results: uri, geometry_type = result gtypes[str(uri)] = str(geometry_type).split('#')[1] results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . ?type rdfs:subClassOf+ ccd:CoreConceptDataSet . } """ ) for result in results: uri, dtype = result dtypes[str(uri)] = str(dtype).split('#')[1] results = g.query( """ prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?label ?type where { ?attribute ada:ofDataSet ?dataset ; skos:exactMatch ?concept ; rdfs:label ?label . optional { ?concept a ?type . ?type rdfs:subClassOf+ ccd:Attribute . } } group by ?dataset ?label ?type """ ) for result in results: dataset, label, atype = result key = str(dataset), str(label) if atype is None and key not in atypes: atypes[key] = '' elif atype is not None: atypes[key] = str(atype).split('#')[1] test_gtypes = {} test_dtypes = {} test_atypes = {} with open('./datasets/annotations_datasets.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_gtypes[row[0]] = row[1] test_dtypes[row[0]] = row[2] with open('./datasets/annotations_attributes.csv', 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_atypes[row[0], row[1]] = row[2] tp = 0 total = 0 fn = len(test_gtypes) for k, v in gtypes.items(): if k not in test_gtypes: continue total += 1 if test_gtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Geometry type scores:') print(f'P: {p} , R: {r} , F: {f}') tp = 0 total = 0 fn = len(test_dtypes) for k, v in dtypes.items(): if k not in test_dtypes: continue total += 1 if test_dtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Dataset type scores:') print(f'P: {p} , R: {r} , F: {f}') filter_nontypes = True if filter_nontypes: test_atypes = {k: v for k, v in test_atypes.items() if v != ''} atypes = {k: v for k, v in atypes.items() if v != ''} tp = 0 total = 0 fn = len(list(filter(lambda x: x != '', test_atypes.values()))) for k, v in atypes.items(): if k not in test_atypes: continue if v != '': total += 1 if test_atypes[k] == v: tp += 1 fn -= 1 elif v == 'BooleanA' and test_atypes[k] == 'NominalA': tp += 1 fn -= 1 else: print(k, v, test_atypes[k]) p = tp / total r = tp / (tp + fn) f = 2 * (p * r / (p + r)) print('Attribute type scores:') print(f'P: {p} , R: {r} , F: {f}') <|reserved_special_token_1|> import rdflib import csv from time import sleep gtypes = {} dtypes = {} atypes = {} g = rdflib.Graph() g.parse("http://geographicknowledge.de/vocab/CoreConceptData.rdf#") g.parse("./ontology.ttl", format="ttl") sleep(.5) results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . filter ( ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet ) ) } """) for result in results: uri, geometry_type = result gtypes[str(uri)] = str(geometry_type).split('#')[1] results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . ?type rdfs:subClassOf+ ccd:CoreConceptDataSet . } """) for result in results: uri, dtype = result dtypes[str(uri)] = str(dtype).split('#')[1] results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?label ?type where { ?attribute ada:ofDataSet ?dataset ; skos:exactMatch ?concept ; rdfs:label ?label . optional { ?concept a ?type . ?type rdfs:subClassOf+ ccd:Attribute . } } group by ?dataset ?label ?type """) for result in results: dataset, label, atype = result key = (str(dataset), str(label)) if atype is None and key not in atypes: atypes[key] = "" elif atype is not None: atypes[key] = str(atype).split('#')[1] test_gtypes = {} test_dtypes = {} test_atypes = {} with open("./datasets/annotations_datasets.csv", 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_gtypes[row[0]] = row[1] test_dtypes[row[0]] = row[2] with open("./datasets/annotations_attributes.csv", 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_atypes[(row[0],row[1])] = row[2] tp = 0 total = 0 fn = len(test_gtypes) for k, v in gtypes.items(): if k not in test_gtypes: # skip some extra test datasets continue total += 1 if test_gtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Geometry type scores:") print(f"P: {p} , R: {r} , F: {f}") tp = 0 total = 0 fn = len(test_dtypes) for k, v in dtypes.items(): if k not in test_dtypes: continue total += 1 if test_dtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Dataset type scores:") print(f"P: {p} , R: {r} , F: {f}") filter_nontypes = True if filter_nontypes: test_atypes = {k: v for k, v in test_atypes.items() if v != ""} atypes = {k: v for k, v in atypes.items() if v != ""} tp = 0 total = 0 fn = len(list(filter(lambda x: x != "", test_atypes.values()))) for k, v in atypes.items(): if k not in test_atypes: continue if v != "": total += 1 if test_atypes[k] == v: tp += 1 fn -= 1 elif v == "BooleanA" and test_atypes[k] == "NominalA": # boolean is "more" correct tp += 1 fn -= 1 else: print(k, v, test_atypes[k]) p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Attribute type scores:") print(f"P: {p} , R: {r} , F: {f}")
flexible
{ "blob_id": "eb1fbe2de3c8548175eb3c8720353e466e3b68c7", "index": 7336, "step-1": "<mask token>\n", "step-2": "<mask token>\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\n<mask token>\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\n<mask token>\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\n<mask token>\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\n<mask token>\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\n<mask token>\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\n<mask token>\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n<mask token>\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\n<mask token>\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n<mask token>\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\n<mask token>\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\n<mask token>\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n", "step-3": "<mask token>\ngtypes = {}\ndtypes = {}\natypes = {}\ng = rdflib.Graph()\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\"\n )\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\"\n )\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\"\n )\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != '', test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n", "step-4": "import rdflib\nimport csv\nfrom time import sleep\ngtypes = {}\ndtypes = {}\natypes = {}\ng = rdflib.Graph()\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\"\n )\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\"\n )\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\"\n )\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != '', test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n", "step-5": "import rdflib\nimport csv\n\nfrom time import sleep\n\ngtypes = {}\ndtypes = {}\natypes = {}\n\ng = rdflib.Graph()\ng.parse(\"http://geographicknowledge.de/vocab/CoreConceptData.rdf#\")\ng.parse(\"./ontology.ttl\", format=\"ttl\")\n\nsleep(.5)\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\")\n\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\")\n\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\n\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\")\n\nfor result in results:\n dataset, label, atype = result\n key = (str(dataset), str(label))\n if atype is None and key not in atypes:\n atypes[key] = \"\"\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\n\n\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\n\nwith open(\"./datasets/annotations_datasets.csv\", 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\n\nwith open(\"./datasets/annotations_attributes.csv\", 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[(row[0],row[1])] = row[2]\n\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes: # skip some extra test datasets\n continue\n total += 1\n\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Geometry type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Dataset type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != \"\"}\n atypes = {k: v for k, v in atypes.items() if v != \"\"}\n\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != \"\", test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n\n if v != \"\":\n total += 1\n\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == \"BooleanA\" and test_atypes[k] == \"NominalA\": # boolean is \"more\" correct\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Attribute type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def skewness_log(df): df['SalePrice_New'] = np.log(df['SalePrice']) df['GrLivArea_New'] = np.log(df['GrLivArea']) skewed_slPri = skew(df['SalePrice_New']) skewness_grLiv = skew(df['GrLivArea_New']) return skewness_grLiv, skewed_slPri <|reserved_special_token_1|> <|reserved_special_token_0|> data = pd.read_csv('data/train.csv') def skewness_log(df): df['SalePrice_New'] = np.log(df['SalePrice']) df['GrLivArea_New'] = np.log(df['GrLivArea']) skewed_slPri = skew(df['SalePrice_New']) skewness_grLiv = skew(df['GrLivArea_New']) return skewness_grLiv, skewed_slPri <|reserved_special_token_1|> from scipy.stats import skew import pandas as pd import numpy as np data = pd.read_csv('data/train.csv') def skewness_log(df): df['SalePrice_New'] = np.log(df['SalePrice']) df['GrLivArea_New'] = np.log(df['GrLivArea']) skewed_slPri = skew(df['SalePrice_New']) skewness_grLiv = skew(df['GrLivArea_New']) return skewness_grLiv, skewed_slPri <|reserved_special_token_1|> # %load q03_skewness_log/build.py from scipy.stats import skew import pandas as pd import numpy as np data = pd.read_csv('data/train.csv') # Write code here: def skewness_log(df): df['SalePrice_New'] = np.log(df['SalePrice']) df['GrLivArea_New'] = np.log(df['GrLivArea']) skewed_slPri = skew(df['SalePrice_New']) skewness_grLiv = skew(df['GrLivArea_New']) return skewness_grLiv,skewed_slPri
flexible
{ "blob_id": "f5bd41f4aaff616a332d80ec44c364ffc91c58f0", "index": 265, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef skewness_log(df):\n df['SalePrice_New'] = np.log(df['SalePrice'])\n df['GrLivArea_New'] = np.log(df['GrLivArea'])\n skewed_slPri = skew(df['SalePrice_New'])\n skewness_grLiv = skew(df['GrLivArea_New'])\n return skewness_grLiv, skewed_slPri\n", "step-3": "<mask token>\ndata = pd.read_csv('data/train.csv')\n\n\ndef skewness_log(df):\n df['SalePrice_New'] = np.log(df['SalePrice'])\n df['GrLivArea_New'] = np.log(df['GrLivArea'])\n skewed_slPri = skew(df['SalePrice_New'])\n skewness_grLiv = skew(df['GrLivArea_New'])\n return skewness_grLiv, skewed_slPri\n", "step-4": "from scipy.stats import skew\nimport pandas as pd\nimport numpy as np\ndata = pd.read_csv('data/train.csv')\n\n\ndef skewness_log(df):\n df['SalePrice_New'] = np.log(df['SalePrice'])\n df['GrLivArea_New'] = np.log(df['GrLivArea'])\n skewed_slPri = skew(df['SalePrice_New'])\n skewness_grLiv = skew(df['GrLivArea_New'])\n return skewness_grLiv, skewed_slPri\n", "step-5": "# %load q03_skewness_log/build.py\nfrom scipy.stats import skew\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('data/train.csv')\n\n\n# Write code here:\ndef skewness_log(df):\n df['SalePrice_New'] = np.log(df['SalePrice'])\n df['GrLivArea_New'] = np.log(df['GrLivArea'])\n skewed_slPri = skew(df['SalePrice_New'])\n skewness_grLiv = skew(df['GrLivArea_New'])\n return skewness_grLiv,skewed_slPri\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def isHarshad(n): if n % findSum(n) == 0: return True return False def findHarshad(low, high): low = 500 high = 525 streak = 0 maxStreak = 0 for i in range(low, high + 1, 1): if isHarshad(i): streak = streak + 1 else: maxStreak = max(streak, maxStreak) streak = 0 maxStreak = max(streak, maxStreak) print(maxStreak) <|reserved_special_token_0|> <|reserved_special_token_1|> def findSum(n): s = 0 while n > 0: x = n % 10 s = s + x n = n // 10 return s def isHarshad(n): if n % findSum(n) == 0: return True return False def findHarshad(low, high): low = 500 high = 525 streak = 0 maxStreak = 0 for i in range(low, high + 1, 1): if isHarshad(i): streak = streak + 1 else: maxStreak = max(streak, maxStreak) streak = 0 maxStreak = max(streak, maxStreak) print(maxStreak) <|reserved_special_token_0|> <|reserved_special_token_1|> def findSum(n): s = 0 while n > 0: x = n % 10 s = s + x n = n // 10 return s def isHarshad(n): if n % findSum(n) == 0: return True return False def findHarshad(low, high): low = 500 high = 525 streak = 0 maxStreak = 0 for i in range(low, high + 1, 1): if isHarshad(i): streak = streak + 1 else: maxStreak = max(streak, maxStreak) streak = 0 maxStreak = max(streak, maxStreak) print(maxStreak) <|reserved_special_token_0|> for line in f: l = f.readline() h = f.readline() findHarshad(l, h) f.close() <|reserved_special_token_1|> def findSum(n): s = 0 while n > 0: x = n % 10 s = s + x n = n // 10 return s def isHarshad(n): if n % findSum(n) == 0: return True return False def findHarshad(low, high): low = 500 high = 525 streak = 0 maxStreak = 0 for i in range(low, high + 1, 1): if isHarshad(i): streak = streak + 1 else: maxStreak = max(streak, maxStreak) streak = 0 maxStreak = max(streak, maxStreak) print(maxStreak) f = open('DwiteHarshadNumbersData.txt', 'r') for line in f: l = f.readline() h = f.readline() findHarshad(l, h) f.close() <|reserved_special_token_1|> #This program is a nice example of a core algorithm #Remove Individual Digits # To remove individual digits you use two operations # 1 MOD: # mod return the remainder after division. 5%2 = 1. # If we mod by 10 we get the units digit. 723%10 = 3 # 2 Integer Division: # Integer division is when we divide and remove decimals; # we DO NOT round, simply cut them off. To integer divide # in Python we use //. For example 723//10 = 72. This is # a quick way to remove decimals. def findSum(n): s = 0 #store the sum of the values while (n > 0): x = n % 10 #chop off units digit store in x s = s + x #add unit digit to sum, stored in s n = n // 10 #remove unit digit from n return s def isHarshad(n): if (n % findSum(n) == 0): #note that if a % b == 0 b is a factor of a return True return False def findHarshad(low, high): low = 500 high = 525 streak = 0 maxStreak = 0 for i in range(low,high + 1,1): if (isHarshad(i)): streak = streak + 1; else: maxStreak = max(streak,maxStreak) streak = 0; #print(i,streak) #Test code for debugging maxStreak = max(streak,maxStreak) print(maxStreak) f = open("DwiteHarshadNumbersData.txt", "r") #Python short cut which loops as long as there is a new line in the file for line in f: l = f.readline() h = f.readline() findHarshad(l,h) f.close()
flexible
{ "blob_id": "2a95a68d8570a314b2b6e5731d7a695e5d7e7b30", "index": 6261, "step-1": "<mask token>\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\n", "step-2": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\n", "step-3": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\nfor line in f:\n l = f.readline()\n h = f.readline()\n findHarshad(l, h)\nf.close()\n", "step-4": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\nf = open('DwiteHarshadNumbersData.txt', 'r')\nfor line in f:\n l = f.readline()\n h = f.readline()\n findHarshad(l, h)\nf.close()\n", "step-5": "#This program is a nice example of a core algorithm\n#Remove Individual Digits\n# To remove individual digits you use two operations\n# 1 MOD:\n\t# mod return the remainder after division. 5%2 = 1.\n\t# If we mod by 10 we get the units digit. 723%10 = 3\n# 2 Integer Division:\n#\tInteger division is when we divide and remove decimals;\n#\twe DO NOT round, simply cut them off. To integer divide \n# \tin Python we use //. For example 723//10 = 72. This is \n# \ta quick way to remove decimals. \n\n\ndef findSum(n):\n\ts = 0\t#store the sum of the values\n\twhile (n > 0):\n\t\tx = n % 10 #chop off units digit store in x\n\t\ts = s + x #add unit digit to sum, stored in s\n\t\tn = n // 10 #remove unit digit from n\n\t\t\n\treturn s\n\ndef isHarshad(n):\n\n\tif (n % findSum(n) == 0): #note that if a % b == 0 b is a factor of a\n\t\treturn True\n\treturn False\n\n\ndef findHarshad(low, high):\n\tlow = 500\n\thigh = 525\n\tstreak = 0\n\tmaxStreak = 0\n\n\tfor i in range(low,high + 1,1):\n\t\tif (isHarshad(i)):\n\t\t\tstreak = streak + 1;\n\t\telse:\n\t\t\tmaxStreak = max(streak,maxStreak)\n\t\t\tstreak = 0;\n\t\t#print(i,streak) #Test code for debugging\n\n\tmaxStreak = max(streak,maxStreak)\n\tprint(maxStreak)\n\nf = open(\"DwiteHarshadNumbersData.txt\", \"r\")\n#Python short cut which loops as long as there is a new line in the file\nfor line in f:\n\tl = f.readline()\n\th = f.readline()\n\tfindHarshad(l,h)\n\nf.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in new: n[i] = s.count(i) <|reserved_special_token_0|> for k, v in n.items(): cnt.append(v) if cnt.count(max(cnt)) > 1: print('?') else: print(max(n, key=n.get)) <|reserved_special_token_1|> <|reserved_special_token_0|> s = list(input().upper()) new = list(set(s)) n = {} for i in new: n[i] = s.count(i) cnt = deque() for k, v in n.items(): cnt.append(v) if cnt.count(max(cnt)) > 1: print('?') else: print(max(n, key=n.get)) <|reserved_special_token_1|> from collections import deque s = list(input().upper()) new = list(set(s)) n = {} for i in new: n[i] = s.count(i) cnt = deque() for k, v in n.items(): cnt.append(v) if cnt.count(max(cnt)) > 1: print('?') else: print(max(n, key=n.get)) <|reserved_special_token_1|> from collections import deque s = list(input().upper()) new = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남 n = {} for i in new: n[i] = s.count(i) cnt = deque() for k, v in n.items(): cnt.append(v) if cnt.count(max(cnt)) >1: print('?') else: print(max(n, key=n.get))
flexible
{ "blob_id": "5dcb20f52b5041d5f9ea028b383e0f2f10104af9", "index": 9486, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in new:\n n[i] = s.count(i)\n<mask token>\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n", "step-3": "<mask token>\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n", "step-4": "from collections import deque\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n", "step-5": "from collections import deque\ns = list(input().upper())\nnew = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남\nn = {}\nfor i in new:\n n[i] = s.count(i)\n\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\n\nif cnt.count(max(cnt)) >1:\n print('?')\nelse:\n print(max(n, key=n.get))\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import sys import json from subprocess import Popen, PIPE, STDOUT from twisted.internet.task import deferLater from twisted.internet import reactor from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS from utils import rsync # TODO: Add Twisted logger # TODO: Create plugin for fileserver (using twistd) # TODO: Thinking about using SSL over my WebSockets message-based protocol (OR using AES algorithm?) CONFIG_IP = 'localhost' CONFIG_PORT = 8888 CONFIG_TEMPLATE = '' CONFIG_DATA = {} BATCH_SIZE = 1 * 2 ** 20 def sendPrefences(port): p = Popen(["python", "./utils/preferences_sender.py", str(CONFIG_TEMPLATE), str(port)], stdout=PIPE, stdin=PIPE, stderr=STDOUT) result = p.communicate()[0] class MessageBasedServerProtocol(WebSocketServerProtocol): """ Message-based WebSockets server Template contains some parts as string: [USER_ID:OPERATION_NAME:FILE_ID:FILE_ENC_PASSWORD] - 15 symbols for USER_ID, 10 symbols for OPERATION_NAME, 25 symbols for FILE_ID 32 symbols for FILE_ENC_PASSWORD other - some data """ def __init__(self): path = CONFIG_DATA['path'] base_dir = CONFIG_DATA['base_dir'] # prepare to working with files... if os.path.exists(path) and os.path.isdir(path): os.chdir(path) if not os.path.exists(base_dir) or not os.path.isdir(base_dir): os.mkdir(base_dir) os.chdir(base_dir) else: os.mkdir(path) os.chdir(path) os.mkdir(base_dir) os.chdir(base_dir) # init some things self.fullpath = path + '/' + base_dir self.status = 'ONLINE' self.commands_handlers = self.__initHandlersUser() self.file_1 = self.file_2 = self.delta_sync = None self.file_enc_psw = None def __initHandlersUser(self): """ Initialize handlers for every command """ handlers = {} handlers['WRITE_FILE'] = self.write_file handlers['READU_FILE'] = self.read_file handlers['DELET_FILE'] = self.delete_file handlers['STATUS_SRV'] = self.status_server handlers['RSYNC_FILE'] = self.rsync_file handlers['WSYNC_FILE'] = self.wsync_file return handlers def __checkUserCatalog(self, user_id): # prepare to working with files... os.chdir(self.fullpath) if not os.path.exists(user_id) or not os.path.isdir(user_id): os.mkdir(user_id) os.chdir(user_id) else: os.chdir(self.fullpath + '/' + user_id) def __get_standart_states(self): return "C", 'Succesfull!' def write_file(self, user_id, file_id, data): print "[USER] User with %s was write a file..." % (self.transport.getPeer()) status, commentary = self.__get_standart_states() self.__checkUserCatalog(user_id) self.status = 'BUSY' operation = "WRT" try: f = open(file_id, "wb") f.write(data) except IOError, argument: status = "E" commentary = argument except Exception, argument: status = "E" commentary = argument raise Exception(argument) finally: f.close() self.status = 'ONLINE' return operation, status, commentary def read_file(self, user_id, file_id, data): print "[USER] User with %s was read a file..." % (self.transport.getPeer()) status, commentary = self.__get_standart_states() self.__checkUserCatalog(user_id) self.status = 'BUSY' operation = "REA" try: f = open(file_id, "rb") commentary = f.read() except IOError, argument: status = "E" commentary = argument except Exception, argument: status = "E" commentary = argument raise Exception(argument) finally: f.close() self.status = 'ONLINE' return operation, status, commentary def delete_file(self, user_id, file_id, data): print "[USER] User with %s was delete a file..." % (self.transport.getPeer()) status, commentary = self.__get_standart_states() self.__checkUserCatalog(user_id) self.status = 'BUSY' operation = "DEL" try: os.remove(file_id) except IOError, argument: status = "E" commentary = argument except Exception, argument: status = "E" commentary = argument raise Exception(argument) self.status = 'ONLINE' return operation, status, commentary def rsync_file(self, user_id, file_id, data): print "[USER] User with %s sync files..." % (self.transport.getPeer()) status, commentary = self.__get_standart_states() self.__checkUserCatalog(user_id) self.status = 'BUSY' operation = "RSY" try: f = open(file_id, "rb") commentary = f.read() except IOError, argument: status = "E" commentary = argument except Exception, argument: status = "E" commentary = argument raise Exception(argument) self.status = 'ONLINE' return operation, status, commentary def wsync_file(self, user_id, file_id, data): print "[USER] User with %s sync files..." % (self.transport.getPeer()) status, commentary = self.__get_standart_states() self.__checkUserCatalog(user_id) self.status = 'BUSY' operation = "WRT" try: unpatched = open(file_id, "rb") hashes = rsync.blockchecksums(unpatched) new_file = file_id + '.new' swap_path = file_id + '~' with open(swap_path, "wb") as out_file: out_file.write(data) patchedfile = open(swap_path, "rb") delta = rsync.rsyncdelta(patchedfile, hashes) unpatched.seek(0) save_to = open(new_file, "wb") rsync.patchstream(unpatched, save_to, delta) save_to.close() patchedfile.close() unpatched.close() if os.path.exists(file_id): os.remove(file_id) os.rename(new_file, file_id) if os.path.exists(swap_path): os.remove(swap_path) except IOError, argument: status = "E" commentary = argument except Exception, argument: status = "E" commentary = argument raise Exception(argument) finally: print 'WSYNC was ended successfully!' self.status = 'ONLINE' return operation, status, commentary def status_server(self, user_id, file_id, data): print "[SERV] Server with %s getting fileserver status..." % (self.transport.getPeer()) status = "C" operation = "STS" commentary = self.status return operation, status, commentary def onOpen(self): print "[USER] User with %s connected" % (self.transport.getPeer()) def connectionLost(self, reason): print '[USER] Lost connection from %s' % (self.transport.getPeer()) def onMessage(self, payload, isBinary): """ Processing request from user and send response """ user_id, cmd, file_id, self.file_enc_psw = payload[:87].replace('[', '').replace(']', '').split(':') self.file_enc_psw = self.file_enc_psw.replace('~', '') data = payload[87:] operation, status, commentary = "UNK", "C", "Successfull!" if cmd in ('WRITE_FILE', 'READU_FILE', 'DELET_FILE', 'STATUS_SRV', 'RSYNC_FILE', 'WSYNC_FILE'): operation, status, commentary = self.commands_handlers[cmd](user_id, file_id, data) self.file_enc_psw = None self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True, sync=True) if __name__ == '__main__': if len(sys.argv) < 3: print "using python fileserver_client.py [PATH_TO_config.json_FILE] [PORT]" else: try: # read config file CONFIG_TEMPLATE = sys.argv[1] with open(CONFIG_TEMPLATE, "r") as f: CONFIG_DATA = json.load(f) # checking IP and PORT CONFIG_PORT = int(sys.argv[2]) except ValueError: print 'PLEASE, enter correct information about server...' sys.exit(1) except Exception, e: print e sys.exit(1) if CONFIG_IP == 'localhost': CONFIG_IP = '127.0.0.1' server_addr = "ws://%s:%d" % (CONFIG_IP, CONFIG_PORT) # create server factory = WebSocketServerFactory(server_addr) factory.protocol = MessageBasedServerProtocol listenWS(factory) # create special Deffered, which sending our server prefences (ip and port) to main server if bool(CONFIG_DATA["debug"]) is False: d = deferLater(reactor, 0, sendPrefences, CONFIG_PORT) reactor.run()
normal
{ "blob_id": "30251b7c2ce30b7fa899a5885707c078788d0106", "index": 1956, "step-1": "import os\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom twisted.internet.task import deferLater\nfrom twisted.internet import reactor\nfrom autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS\n\nfrom utils import rsync\n\n# TODO: Add Twisted logger\n# TODO: Create plugin for fileserver (using twistd)\n# TODO: Thinking about using SSL over my WebSockets message-based protocol (OR using AES algorithm?)\n\nCONFIG_IP = 'localhost'\nCONFIG_PORT = 8888\nCONFIG_TEMPLATE = ''\nCONFIG_DATA = {}\nBATCH_SIZE = 1 * 2 ** 20\n\n\ndef sendPrefences(port):\n p = Popen([\"python\", \"./utils/preferences_sender.py\", str(CONFIG_TEMPLATE), str(port)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n result = p.communicate()[0]\n\n\nclass MessageBasedServerProtocol(WebSocketServerProtocol):\n \"\"\"\n Message-based WebSockets server\n Template contains some parts as string:\n [USER_ID:OPERATION_NAME:FILE_ID:FILE_ENC_PASSWORD] - 15 symbols for USER_ID,\n 10 symbols for OPERATION_NAME,\n 25 symbols for FILE_ID\n 32 symbols for FILE_ENC_PASSWORD\n other - some data\n \"\"\"\n\n def __init__(self):\n path = CONFIG_DATA['path']\n base_dir = CONFIG_DATA['base_dir']\n # prepare to working with files...\n if os.path.exists(path) and os.path.isdir(path):\n os.chdir(path)\n if not os.path.exists(base_dir) or not os.path.isdir(base_dir):\n os.mkdir(base_dir)\n os.chdir(base_dir)\n else:\n os.mkdir(path)\n os.chdir(path)\n os.mkdir(base_dir)\n os.chdir(base_dir)\n # init some things\n self.fullpath = path + '/' + base_dir\n self.status = 'ONLINE'\n self.commands_handlers = self.__initHandlersUser()\n self.file_1 = self.file_2 = self.delta_sync = None\n self.file_enc_psw = None\n\n def __initHandlersUser(self):\n \"\"\"\n Initialize handlers for every command\n \"\"\"\n handlers = {}\n handlers['WRITE_FILE'] = self.write_file\n handlers['READU_FILE'] = self.read_file\n handlers['DELET_FILE'] = self.delete_file\n handlers['STATUS_SRV'] = self.status_server\n handlers['RSYNC_FILE'] = self.rsync_file\n handlers['WSYNC_FILE'] = self.wsync_file\n return handlers\n\n def __checkUserCatalog(self, user_id):\n # prepare to working with files...\n os.chdir(self.fullpath)\n if not os.path.exists(user_id) or not os.path.isdir(user_id):\n os.mkdir(user_id)\n os.chdir(user_id)\n else:\n os.chdir(self.fullpath + '/' + user_id)\n\n def __get_standart_states(self):\n return \"C\", 'Succesfull!'\n\n def write_file(self, user_id, file_id, data):\n print \"[USER] User with %s was write a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"WRT\"\n try:\n f = open(file_id, \"wb\")\n f.write(data)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n f.close()\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def read_file(self, user_id, file_id, data):\n print \"[USER] User with %s was read a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"REA\"\n try:\n f = open(file_id, \"rb\")\n commentary = f.read()\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n f.close()\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def delete_file(self, user_id, file_id, data):\n print \"[USER] User with %s was delete a file...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"DEL\"\n try:\n os.remove(file_id)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def rsync_file(self, user_id, file_id, data):\n print \"[USER] User with %s sync files...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"RSY\"\n try:\n f = open(file_id, \"rb\")\n commentary = f.read()\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def wsync_file(self, user_id, file_id, data):\n print \"[USER] User with %s sync files...\" % (self.transport.getPeer())\n status, commentary = self.__get_standart_states()\n self.__checkUserCatalog(user_id)\n self.status = 'BUSY'\n operation = \"WRT\"\n try:\n unpatched = open(file_id, \"rb\")\n hashes = rsync.blockchecksums(unpatched)\n\n new_file = file_id + '.new'\n swap_path = file_id + '~'\n with open(swap_path, \"wb\") as out_file:\n out_file.write(data)\n\n patchedfile = open(swap_path, \"rb\")\n delta = rsync.rsyncdelta(patchedfile, hashes)\n\n unpatched.seek(0)\n save_to = open(new_file, \"wb\")\n rsync.patchstream(unpatched, save_to, delta)\n\n save_to.close()\n patchedfile.close()\n unpatched.close()\n\n if os.path.exists(file_id):\n os.remove(file_id)\n\n os.rename(new_file, file_id)\n\n if os.path.exists(swap_path):\n os.remove(swap_path)\n except IOError, argument:\n status = \"E\"\n commentary = argument\n except Exception, argument:\n status = \"E\"\n commentary = argument\n raise Exception(argument)\n finally:\n print 'WSYNC was ended successfully!'\n self.status = 'ONLINE'\n return operation, status, commentary\n\n def status_server(self, user_id, file_id, data):\n print \"[SERV] Server with %s getting fileserver status...\" % (self.transport.getPeer())\n status = \"C\"\n operation = \"STS\"\n commentary = self.status\n return operation, status, commentary\n\n def onOpen(self):\n print \"[USER] User with %s connected\" % (self.transport.getPeer())\n\n def connectionLost(self, reason):\n print '[USER] Lost connection from %s' % (self.transport.getPeer())\n\n def onMessage(self, payload, isBinary):\n \"\"\"\n Processing request from user and send response\n \"\"\"\n user_id, cmd, file_id, self.file_enc_psw = payload[:87].replace('[', '').replace(']', '').split(':')\n self.file_enc_psw = self.file_enc_psw.replace('~', '')\n data = payload[87:]\n operation, status, commentary = \"UNK\", \"C\", \"Successfull!\"\n if cmd in ('WRITE_FILE', 'READU_FILE', 'DELET_FILE', 'STATUS_SRV', 'RSYNC_FILE', 'WSYNC_FILE'):\n operation, status, commentary = self.commands_handlers[cmd](user_id, file_id, data)\n self.file_enc_psw = None\n self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True, sync=True)\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print \"using python fileserver_client.py [PATH_TO_config.json_FILE] [PORT]\"\n else:\n try:\n # read config file\n CONFIG_TEMPLATE = sys.argv[1]\n with open(CONFIG_TEMPLATE, \"r\") as f:\n CONFIG_DATA = json.load(f)\n # checking IP and PORT\n CONFIG_PORT = int(sys.argv[2])\n except ValueError:\n print 'PLEASE, enter correct information about server...'\n sys.exit(1)\n except Exception, e:\n print e\n sys.exit(1)\n if CONFIG_IP == 'localhost':\n CONFIG_IP = '127.0.0.1'\n server_addr = \"ws://%s:%d\" % (CONFIG_IP, CONFIG_PORT)\n # create server\n factory = WebSocketServerFactory(server_addr)\n factory.protocol = MessageBasedServerProtocol\n listenWS(factory)\n # create special Deffered, which sending our server prefences (ip and port) to main server\n if bool(CONFIG_DATA[\"debug\"]) is False:\n d = deferLater(reactor, 0, sendPrefences, CONFIG_PORT)\n reactor.run()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import json from django.db import models from django.conf import settings from django.core.serializers import serialize # Create your models here. def upload_updated_image(instance,filename): return '/MyApi/{user}/{filename}'.format(user=instance.user,filename=filename) class UpdateQueryset(models.QuerySet): def serialize(self): # dot value method list_value=list(self.values("user","id","name","content","image")) return json.dumps(list_value) class UpdateManager(models.Manager): def get_queryset(self): return UpdateQueryset(self.model,using=self.db) class CRUD(models.Model): user =models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE) name =models.TextField(blank=True,null=True) content =models.TextField(blank=True,null=True) image =models.ImageField(upload_to=upload_updated_image,null=True,blank=True) updated =models.DateTimeField(auto_now=True) timestamp =models.DateTimeField(auto_now_add=True) # This is modellistview objects=UpdateManager() def __str__(self): return self.name or "" #This is for modeldetailview def serialize(self): try: image=self.image.url except: image="" data={ "user":self.user.id, "id":self.id, "name":self.name, "content":self.content, "image":image } return json.dumps(data)
normal
{ "blob_id": "5749f30d1a1efd5404654d755bca4515adcf4bca", "index": 1810, "step-1": "<mask token>\n\n\nclass CRUD(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n name = models.TextField(blank=True, null=True)\n content = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to=upload_updated_image, null=True,\n blank=True)\n updated = models.DateTimeField(auto_now=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n objects = UpdateManager()\n\n def __str__(self):\n return self.name or ''\n\n def serialize(self):\n try:\n image = self.image.url\n except:\n image = ''\n data = {'user': self.user.id, 'id': self.id, 'name': self.name,\n 'content': self.content, 'image': image}\n return json.dumps(data)\n", "step-2": "<mask token>\n\n\nclass UpdateQueryset(models.QuerySet):\n\n def serialize(self):\n list_value = list(self.values('user', 'id', 'name', 'content', 'image')\n )\n return json.dumps(list_value)\n\n\nclass UpdateManager(models.Manager):\n\n def get_queryset(self):\n return UpdateQueryset(self.model, using=self.db)\n\n\nclass CRUD(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n name = models.TextField(blank=True, null=True)\n content = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to=upload_updated_image, null=True,\n blank=True)\n updated = models.DateTimeField(auto_now=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n objects = UpdateManager()\n\n def __str__(self):\n return self.name or ''\n\n def serialize(self):\n try:\n image = self.image.url\n except:\n image = ''\n data = {'user': self.user.id, 'id': self.id, 'name': self.name,\n 'content': self.content, 'image': image}\n return json.dumps(data)\n", "step-3": "<mask token>\n\n\ndef upload_updated_image(instance, filename):\n return '/MyApi/{user}/{filename}'.format(user=instance.user, filename=\n filename)\n\n\nclass UpdateQueryset(models.QuerySet):\n\n def serialize(self):\n list_value = list(self.values('user', 'id', 'name', 'content', 'image')\n )\n return json.dumps(list_value)\n\n\nclass UpdateManager(models.Manager):\n\n def get_queryset(self):\n return UpdateQueryset(self.model, using=self.db)\n\n\nclass CRUD(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n name = models.TextField(blank=True, null=True)\n content = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to=upload_updated_image, null=True,\n blank=True)\n updated = models.DateTimeField(auto_now=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n objects = UpdateManager()\n\n def __str__(self):\n return self.name or ''\n\n def serialize(self):\n try:\n image = self.image.url\n except:\n image = ''\n data = {'user': self.user.id, 'id': self.id, 'name': self.name,\n 'content': self.content, 'image': image}\n return json.dumps(data)\n", "step-4": "import json\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.serializers import serialize\n\n\ndef upload_updated_image(instance, filename):\n return '/MyApi/{user}/{filename}'.format(user=instance.user, filename=\n filename)\n\n\nclass UpdateQueryset(models.QuerySet):\n\n def serialize(self):\n list_value = list(self.values('user', 'id', 'name', 'content', 'image')\n )\n return json.dumps(list_value)\n\n\nclass UpdateManager(models.Manager):\n\n def get_queryset(self):\n return UpdateQueryset(self.model, using=self.db)\n\n\nclass CRUD(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n name = models.TextField(blank=True, null=True)\n content = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to=upload_updated_image, null=True,\n blank=True)\n updated = models.DateTimeField(auto_now=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n objects = UpdateManager()\n\n def __str__(self):\n return self.name or ''\n\n def serialize(self):\n try:\n image = self.image.url\n except:\n image = ''\n data = {'user': self.user.id, 'id': self.id, 'name': self.name,\n 'content': self.content, 'image': image}\n return json.dumps(data)\n", "step-5": "import json\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.serializers import serialize\n\n# Create your models here.\n\ndef upload_updated_image(instance,filename):\n return '/MyApi/{user}/{filename}'.format(user=instance.user,filename=filename)\n\nclass UpdateQueryset(models.QuerySet):\n def serialize(self):\n # dot value method\n list_value=list(self.values(\"user\",\"id\",\"name\",\"content\",\"image\")) \n return json.dumps(list_value)\n\nclass UpdateManager(models.Manager):\n def get_queryset(self):\n return UpdateQueryset(self.model,using=self.db)\n\nclass CRUD(models.Model):\n user =models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\n name =models.TextField(blank=True,null=True)\n content =models.TextField(blank=True,null=True)\n image =models.ImageField(upload_to=upload_updated_image,null=True,blank=True)\n updated =models.DateTimeField(auto_now=True)\n timestamp =models.DateTimeField(auto_now_add=True)\n \n # This is modellistview\n objects=UpdateManager()\n\n def __str__(self):\n return self.name or \"\"\n\n \n #This is for modeldetailview\n def serialize(self):\n try:\n image=self.image.url\n except:\n image=\"\"\n data={\n \"user\":self.user.id,\n \"id\":self.id,\n \"name\":self.name,\n \"content\":self.content,\n \"image\":image\n }\n\n return json.dumps(data)\n", "step-ids": [ 4, 8, 9, 10, 11 ] }
[ 4, 8, 9, 10, 11 ]
from flask import Flask, render_template from flask_ask import Ask, statement, question, session import reverse_geocoder as rg from geopy import distance from geopy.geocoders import Nominatim import requests import time ''' :::::::: ::::::::: ::: :::::::: :::::::::: ::: ::: ::: ::: ::: ::: ::: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +#++:++#++ +#++:++#+ +#++:++#++: +#+ +#++:++# +#+ +:+ +#+ +#++:++#++: +#+ +#++:++ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ #+# #+# #+# #+# #+# #+# #+# #+# #+#+# #+#+# #+# #+# #+# #+# #+# ######## ### ### ### ######## ########## ### ### ### ### ########## ### ### ### ''' app = Flask(__name__) ask = Ask(app, "/space_walk") def find_ordinals(city, iss): ''' Take tuple coordinates (lat, lon) for City and ISS and find the cardinal direction of NE, SE, SW, NW ''' if iss[0] - city[0] > 0: a = 'North' else: a = 'South' if iss[1] - city[1] > 0: b = 'East' else: b = 'West' return ''.join([a, b]) def where_is_the_iss_now(): iss_now_website = 'http://api.open-notify.org/iss-now.json' webby = requests.get(iss_now_website) data = webby.json() if data['iss_position']: longitude = data['iss_position'].get('longitude') latitude = data['iss_position'].get('latitude') results = rg.search((latitude, longitude), mode=1) lat, lon, name, admin1, admin2, cc = results[0].values() ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(latitude), float(longitude))) country_cc = requests.get( 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json') country_cc = country_cc.json() iss_coordinates = (latitude, longitude) k_nearest_coordinates = (lat, lon) distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates).miles country_name = '' for i in filter(lambda d: d.get('Code') == cc, country_cc): country_name = i.get('Name') location_text = ', '.join([name, admin1, country_name]) if distance_miles > 150: answer = 'The International Space Station is {} miles {} off the coast of {}'.format(int(distance_miles), ordinal, location_text) else: answer = 'the International Space Station is {} miles {} near {}'.format(int(distance_miles),ordinal, location_text) return answer, latitude, longitude, distance_miles, ordinal, name, admin1, country_name @app.route('/') def homepage(): return '' @ask.launch def start_skill(): # welcome_message = 'Welcome to the Fleet Feet Journal! What is your name?' welcome_message_reprompt = render_template('welcome_message_reprompt') welcome_message = render_template('welcome_message') return (question(welcome_message).reprompt(welcome_message_reprompt)) @ask.intent('YourLocation') def pass_over(my_location): geolocator = Nominatim(user_agent='my-application') print(my_location) location = geolocator.geocode(my_location,language='en-US') try: city = location.address.split(',')[0] state = location.address.split(',')[2] country = location.address.split(',')[-1] location_name = ', '.join([city, state, country]) except IndexError: location_name = location.address.split(',')[-1] fly_over = requests.get( 'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(location.latitude, location.longitude)) fly_over = fly_over.json() if fly_over['message'] == 'success': rise = fly_over['response'][0] answer = time.strftime('%A, %B %d, %Y at %I:%M %p GMT', time.localtime(rise.get('risetime'))) a = rise.get('risetime') # last epoch recorded b = time.time() # current epoch time c = a - b # returns seconds hours = c // 3600 % 24 minutes = c // 60 % 60 minutes = int(minutes) hours = int(hours) if minutes == 1: minorminutes = 'minute' else: minorminutes = 'minutes' if hours == 1: hour_or_hours = 'hour' else: hour_or_hours = 'hours' if hours == 0: time_til_rise = "{} {}".format(minutes, minorminutes) else: time_til_rise = "{} {} and {} {}".format(hours, hour_or_hours, minutes, minorminutes) else: answer = "failure" return statement('the next flyover for {} will begin in {} on {}'.format(location_name, time_til_rise, answer)) @ask.intent('WhereISS') def share_location(): iss_location, latitude, longitude, distance_miles, ordinal, name, admin1, country_name= where_is_the_iss_now() latitude, longitude, distance_miles = float(latitude), float(longitude), float(distance_miles) return statement(iss_location).standard_card( title="Location of the International Space Station", text='Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}'.format(round(latitude,2), round(longitude,2), round(distance_miles,0), ordinal, name, admin1, country_name)) @ask.intent('AMAZON.FallbackIntent') def fallback(): to_continue = render_template('to_continue') return question('Sorry, I am not sure what you asked me...{}'.format(to_continue)) @ask.intent('AMAZON.NavigateHomeIntent') def go_home(): return question('et - phone home') @ask.intent('AMAZON.HelpIntent') def help_me(): help_me_text = render_template('help') return question(help_me_text) @ask.intent('Credits') def speak_credits(): credits_ = render_template('credits') return statement(credits_) @ask.intent('AMAZON.StopIntent') def stop(): bye_text = render_template('bye') return statement(bye_text) @ask.intent('AMAZON.CancelIntent') def cancel(): bye_text = render_template('bye') return statement(bye_text) @ask.session_ended def session_ended(): return "{}", 200 if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "726f133bcf592315c42f8701be8308422ffbf0d9", "index": 426, "step-1": "<mask token>\n\n\ndef where_is_the_iss_now():\n iss_now_website = 'http://api.open-notify.org/iss-now.json'\n webby = requests.get(iss_now_website)\n data = webby.json()\n if data['iss_position']:\n longitude = data['iss_position'].get('longitude')\n latitude = data['iss_position'].get('latitude')\n results = rg.search((latitude, longitude), mode=1)\n lat, lon, name, admin1, admin2, cc = results[0].values()\n ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(\n latitude), float(longitude)))\n country_cc = requests.get(\n 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json'\n )\n country_cc = country_cc.json()\n iss_coordinates = latitude, longitude\n k_nearest_coordinates = lat, lon\n distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates\n ).miles\n country_name = ''\n for i in filter(lambda d: d.get('Code') == cc, country_cc):\n country_name = i.get('Name')\n location_text = ', '.join([name, admin1, country_name])\n if distance_miles > 150:\n answer = (\n 'The International Space Station is {} miles {} off the coast of {}'\n .format(int(distance_miles), ordinal, location_text))\n else:\n answer = ('the International Space Station is {} miles {} near {}'.\n format(int(distance_miles), ordinal, location_text))\n return (answer, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name)\n\n\[email protected]('/')\ndef homepage():\n return ''\n\n\[email protected]\ndef start_skill():\n welcome_message_reprompt = render_template('welcome_message_reprompt')\n welcome_message = render_template('welcome_message')\n return question(welcome_message).reprompt(welcome_message_reprompt)\n\n\n<mask token>\n\n\[email protected]('WhereISS')\ndef share_location():\n (iss_location, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name) = where_is_the_iss_now()\n latitude, longitude, distance_miles = float(latitude), float(longitude\n ), float(distance_miles)\n return statement(iss_location).standard_card(title=\n 'Location of the International Space Station', text=\n \"\"\"Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}\"\"\".\n format(round(latitude, 2), round(longitude, 2), round(\n distance_miles, 0), ordinal, name, admin1, country_name))\n\n\[email protected]('AMAZON.FallbackIntent')\ndef fallback():\n to_continue = render_template('to_continue')\n return question('Sorry, I am not sure what you asked me...{}'.format(\n to_continue))\n\n\n<mask token>\n\n\[email protected]('AMAZON.HelpIntent')\ndef help_me():\n help_me_text = render_template('help')\n return question(help_me_text)\n\n\n<mask token>\n\n\[email protected]('AMAZON.StopIntent')\ndef stop():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]('AMAZON.CancelIntent')\ndef cancel():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef where_is_the_iss_now():\n iss_now_website = 'http://api.open-notify.org/iss-now.json'\n webby = requests.get(iss_now_website)\n data = webby.json()\n if data['iss_position']:\n longitude = data['iss_position'].get('longitude')\n latitude = data['iss_position'].get('latitude')\n results = rg.search((latitude, longitude), mode=1)\n lat, lon, name, admin1, admin2, cc = results[0].values()\n ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(\n latitude), float(longitude)))\n country_cc = requests.get(\n 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json'\n )\n country_cc = country_cc.json()\n iss_coordinates = latitude, longitude\n k_nearest_coordinates = lat, lon\n distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates\n ).miles\n country_name = ''\n for i in filter(lambda d: d.get('Code') == cc, country_cc):\n country_name = i.get('Name')\n location_text = ', '.join([name, admin1, country_name])\n if distance_miles > 150:\n answer = (\n 'The International Space Station is {} miles {} off the coast of {}'\n .format(int(distance_miles), ordinal, location_text))\n else:\n answer = ('the International Space Station is {} miles {} near {}'.\n format(int(distance_miles), ordinal, location_text))\n return (answer, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name)\n\n\[email protected]('/')\ndef homepage():\n return ''\n\n\[email protected]\ndef start_skill():\n welcome_message_reprompt = render_template('welcome_message_reprompt')\n welcome_message = render_template('welcome_message')\n return question(welcome_message).reprompt(welcome_message_reprompt)\n\n\n<mask token>\n\n\[email protected]('WhereISS')\ndef share_location():\n (iss_location, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name) = where_is_the_iss_now()\n latitude, longitude, distance_miles = float(latitude), float(longitude\n ), float(distance_miles)\n return statement(iss_location).standard_card(title=\n 'Location of the International Space Station', text=\n \"\"\"Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}\"\"\".\n format(round(latitude, 2), round(longitude, 2), round(\n distance_miles, 0), ordinal, name, admin1, country_name))\n\n\[email protected]('AMAZON.FallbackIntent')\ndef fallback():\n to_continue = render_template('to_continue')\n return question('Sorry, I am not sure what you asked me...{}'.format(\n to_continue))\n\n\[email protected]('AMAZON.NavigateHomeIntent')\ndef go_home():\n return question('et - phone home')\n\n\[email protected]('AMAZON.HelpIntent')\ndef help_me():\n help_me_text = render_template('help')\n return question(help_me_text)\n\n\n<mask token>\n\n\[email protected]('AMAZON.StopIntent')\ndef stop():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]('AMAZON.CancelIntent')\ndef cancel():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef where_is_the_iss_now():\n iss_now_website = 'http://api.open-notify.org/iss-now.json'\n webby = requests.get(iss_now_website)\n data = webby.json()\n if data['iss_position']:\n longitude = data['iss_position'].get('longitude')\n latitude = data['iss_position'].get('latitude')\n results = rg.search((latitude, longitude), mode=1)\n lat, lon, name, admin1, admin2, cc = results[0].values()\n ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(\n latitude), float(longitude)))\n country_cc = requests.get(\n 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json'\n )\n country_cc = country_cc.json()\n iss_coordinates = latitude, longitude\n k_nearest_coordinates = lat, lon\n distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates\n ).miles\n country_name = ''\n for i in filter(lambda d: d.get('Code') == cc, country_cc):\n country_name = i.get('Name')\n location_text = ', '.join([name, admin1, country_name])\n if distance_miles > 150:\n answer = (\n 'The International Space Station is {} miles {} off the coast of {}'\n .format(int(distance_miles), ordinal, location_text))\n else:\n answer = ('the International Space Station is {} miles {} near {}'.\n format(int(distance_miles), ordinal, location_text))\n return (answer, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name)\n\n\[email protected]('/')\ndef homepage():\n return ''\n\n\[email protected]\ndef start_skill():\n welcome_message_reprompt = render_template('welcome_message_reprompt')\n welcome_message = render_template('welcome_message')\n return question(welcome_message).reprompt(welcome_message_reprompt)\n\n\[email protected]('YourLocation')\ndef pass_over(my_location):\n geolocator = Nominatim(user_agent='my-application')\n print(my_location)\n location = geolocator.geocode(my_location, language='en-US')\n try:\n city = location.address.split(',')[0]\n state = location.address.split(',')[2]\n country = location.address.split(',')[-1]\n location_name = ', '.join([city, state, country])\n except IndexError:\n location_name = location.address.split(',')[-1]\n fly_over = requests.get(\n 'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(\n location.latitude, location.longitude))\n fly_over = fly_over.json()\n if fly_over['message'] == 'success':\n rise = fly_over['response'][0]\n answer = time.strftime('%A, %B %d, %Y at %I:%M %p GMT', time.\n localtime(rise.get('risetime')))\n a = rise.get('risetime')\n b = time.time()\n c = a - b\n hours = c // 3600 % 24\n minutes = c // 60 % 60\n minutes = int(minutes)\n hours = int(hours)\n if minutes == 1:\n minorminutes = 'minute'\n else:\n minorminutes = 'minutes'\n if hours == 1:\n hour_or_hours = 'hour'\n else:\n hour_or_hours = 'hours'\n if hours == 0:\n time_til_rise = '{} {}'.format(minutes, minorminutes)\n else:\n time_til_rise = '{} {} and {} {}'.format(hours, hour_or_hours,\n minutes, minorminutes)\n else:\n answer = 'failure'\n return statement('the next flyover for {} will begin in {} on {}'.\n format(location_name, time_til_rise, answer))\n\n\[email protected]('WhereISS')\ndef share_location():\n (iss_location, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name) = where_is_the_iss_now()\n latitude, longitude, distance_miles = float(latitude), float(longitude\n ), float(distance_miles)\n return statement(iss_location).standard_card(title=\n 'Location of the International Space Station', text=\n \"\"\"Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}\"\"\".\n format(round(latitude, 2), round(longitude, 2), round(\n distance_miles, 0), ordinal, name, admin1, country_name))\n\n\[email protected]('AMAZON.FallbackIntent')\ndef fallback():\n to_continue = render_template('to_continue')\n return question('Sorry, I am not sure what you asked me...{}'.format(\n to_continue))\n\n\[email protected]('AMAZON.NavigateHomeIntent')\ndef go_home():\n return question('et - phone home')\n\n\[email protected]('AMAZON.HelpIntent')\ndef help_me():\n help_me_text = render_template('help')\n return question(help_me_text)\n\n\n<mask token>\n\n\[email protected]('AMAZON.StopIntent')\ndef stop():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]('AMAZON.CancelIntent')\ndef cancel():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]_ended\ndef session_ended():\n return '{}', 200\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef where_is_the_iss_now():\n iss_now_website = 'http://api.open-notify.org/iss-now.json'\n webby = requests.get(iss_now_website)\n data = webby.json()\n if data['iss_position']:\n longitude = data['iss_position'].get('longitude')\n latitude = data['iss_position'].get('latitude')\n results = rg.search((latitude, longitude), mode=1)\n lat, lon, name, admin1, admin2, cc = results[0].values()\n ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(\n latitude), float(longitude)))\n country_cc = requests.get(\n 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json'\n )\n country_cc = country_cc.json()\n iss_coordinates = latitude, longitude\n k_nearest_coordinates = lat, lon\n distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates\n ).miles\n country_name = ''\n for i in filter(lambda d: d.get('Code') == cc, country_cc):\n country_name = i.get('Name')\n location_text = ', '.join([name, admin1, country_name])\n if distance_miles > 150:\n answer = (\n 'The International Space Station is {} miles {} off the coast of {}'\n .format(int(distance_miles), ordinal, location_text))\n else:\n answer = ('the International Space Station is {} miles {} near {}'.\n format(int(distance_miles), ordinal, location_text))\n return (answer, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name)\n\n\[email protected]('/')\ndef homepage():\n return ''\n\n\[email protected]\ndef start_skill():\n welcome_message_reprompt = render_template('welcome_message_reprompt')\n welcome_message = render_template('welcome_message')\n return question(welcome_message).reprompt(welcome_message_reprompt)\n\n\[email protected]('YourLocation')\ndef pass_over(my_location):\n geolocator = Nominatim(user_agent='my-application')\n print(my_location)\n location = geolocator.geocode(my_location, language='en-US')\n try:\n city = location.address.split(',')[0]\n state = location.address.split(',')[2]\n country = location.address.split(',')[-1]\n location_name = ', '.join([city, state, country])\n except IndexError:\n location_name = location.address.split(',')[-1]\n fly_over = requests.get(\n 'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(\n location.latitude, location.longitude))\n fly_over = fly_over.json()\n if fly_over['message'] == 'success':\n rise = fly_over['response'][0]\n answer = time.strftime('%A, %B %d, %Y at %I:%M %p GMT', time.\n localtime(rise.get('risetime')))\n a = rise.get('risetime')\n b = time.time()\n c = a - b\n hours = c // 3600 % 24\n minutes = c // 60 % 60\n minutes = int(minutes)\n hours = int(hours)\n if minutes == 1:\n minorminutes = 'minute'\n else:\n minorminutes = 'minutes'\n if hours == 1:\n hour_or_hours = 'hour'\n else:\n hour_or_hours = 'hours'\n if hours == 0:\n time_til_rise = '{} {}'.format(minutes, minorminutes)\n else:\n time_til_rise = '{} {} and {} {}'.format(hours, hour_or_hours,\n minutes, minorminutes)\n else:\n answer = 'failure'\n return statement('the next flyover for {} will begin in {} on {}'.\n format(location_name, time_til_rise, answer))\n\n\[email protected]('WhereISS')\ndef share_location():\n (iss_location, latitude, longitude, distance_miles, ordinal, name,\n admin1, country_name) = where_is_the_iss_now()\n latitude, longitude, distance_miles = float(latitude), float(longitude\n ), float(distance_miles)\n return statement(iss_location).standard_card(title=\n 'Location of the International Space Station', text=\n \"\"\"Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}\"\"\".\n format(round(latitude, 2), round(longitude, 2), round(\n distance_miles, 0), ordinal, name, admin1, country_name))\n\n\[email protected]('AMAZON.FallbackIntent')\ndef fallback():\n to_continue = render_template('to_continue')\n return question('Sorry, I am not sure what you asked me...{}'.format(\n to_continue))\n\n\[email protected]('AMAZON.NavigateHomeIntent')\ndef go_home():\n return question('et - phone home')\n\n\[email protected]('AMAZON.HelpIntent')\ndef help_me():\n help_me_text = render_template('help')\n return question(help_me_text)\n\n\[email protected]('Credits')\ndef speak_credits():\n credits_ = render_template('credits')\n return statement(credits_)\n\n\[email protected]('AMAZON.StopIntent')\ndef stop():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]('AMAZON.CancelIntent')\ndef cancel():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]_ended\ndef session_ended():\n return '{}', 200\n\n\n<mask token>\n", "step-5": "\nfrom flask import Flask, render_template\nfrom flask_ask import Ask, statement, question, session\nimport reverse_geocoder as rg\nfrom geopy import distance\nfrom geopy.geocoders import Nominatim\nimport requests\nimport time\n\n\n'''\n :::::::: ::::::::: ::: :::::::: :::::::::: ::: ::: ::: ::: ::: ::: ::: \n:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: \n+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ \n+#++:++#++ +#++:++#+ +#++:++#++: +#+ +#++:++# +#+ +:+ +#+ +#++:++#++: +#+ +#++:++ +#+ \n +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ \n#+# #+# #+# #+# #+# #+# #+# #+# #+#+# #+#+# #+# #+# #+# #+# #+# \n ######## ### ### ### ######## ########## ### ### ### ### ########## ### ### ### \n'''\n\n\napp = Flask(__name__)\n\nask = Ask(app, \"/space_walk\")\n\n\ndef find_ordinals(city, iss):\n ''' \n Take tuple coordinates (lat, lon) for City and ISS and\n find the cardinal direction of NE, SE, SW, NW\n '''\n\n if iss[0] - city[0] > 0:\n a = 'North'\n else:\n a = 'South'\n\n if iss[1] - city[1] > 0:\n b = 'East'\n else:\n b = 'West'\n return ''.join([a, b])\n\n\ndef where_is_the_iss_now():\n iss_now_website = 'http://api.open-notify.org/iss-now.json'\n webby = requests.get(iss_now_website)\n data = webby.json()\n\n if data['iss_position']:\n longitude = data['iss_position'].get('longitude')\n latitude = data['iss_position'].get('latitude')\n\n results = rg.search((latitude, longitude), mode=1)\n\n lat, lon, name, admin1, admin2, cc = results[0].values()\n\n ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(latitude), float(longitude)))\n\n country_cc = requests.get(\n 'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json')\n country_cc = country_cc.json()\n\n iss_coordinates = (latitude, longitude)\n k_nearest_coordinates = (lat, lon)\n distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates).miles\n\n country_name = ''\n for i in filter(lambda d: d.get('Code') == cc, country_cc):\n country_name = i.get('Name')\n\n location_text = ', '.join([name, admin1, country_name])\n\n if distance_miles > 150:\n answer = 'The International Space Station is {} miles {} off the coast of {}'.format(int(distance_miles), ordinal,\n location_text)\n else:\n answer = 'the International Space Station is {} miles {} near {}'.format(int(distance_miles),ordinal, location_text)\n return answer, latitude, longitude, distance_miles, ordinal, name, admin1, country_name\n\n\[email protected]('/')\ndef homepage():\n return ''\n\n\[email protected]\ndef start_skill():\n # welcome_message = 'Welcome to the Fleet Feet Journal! What is your name?'\n\n welcome_message_reprompt = render_template('welcome_message_reprompt')\n welcome_message = render_template('welcome_message')\n return (question(welcome_message).reprompt(welcome_message_reprompt))\n\n\[email protected]('YourLocation')\ndef pass_over(my_location):\n\n geolocator = Nominatim(user_agent='my-application')\n print(my_location)\n location = geolocator.geocode(my_location,language='en-US')\n try:\n city = location.address.split(',')[0]\n state = location.address.split(',')[2]\n country = location.address.split(',')[-1]\n location_name = ', '.join([city, state, country])\n except IndexError:\n location_name = location.address.split(',')[-1]\n\n fly_over = requests.get(\n 'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(location.latitude, location.longitude))\n fly_over = fly_over.json()\n\n if fly_over['message'] == 'success':\n rise = fly_over['response'][0]\n answer = time.strftime('%A, %B %d, %Y at %I:%M %p GMT', time.localtime(rise.get('risetime')))\n a = rise.get('risetime') # last epoch recorded\n b = time.time() # current epoch time\n c = a - b # returns seconds\n hours = c // 3600 % 24\n minutes = c // 60 % 60\n minutes = int(minutes)\n hours = int(hours)\n\n if minutes == 1:\n minorminutes = 'minute'\n else: minorminutes = 'minutes'\n\n if hours == 1:\n hour_or_hours = 'hour'\n else: hour_or_hours = 'hours'\n\n if hours == 0:\n time_til_rise = \"{} {}\".format(minutes, minorminutes)\n else: time_til_rise = \"{} {} and {} {}\".format(hours, hour_or_hours, minutes, minorminutes)\n\n else:\n answer = \"failure\"\n return statement('the next flyover for {} will begin in {} on {}'.format(location_name, time_til_rise, answer))\n\n\[email protected]('WhereISS')\ndef share_location():\n\n iss_location, latitude, longitude, distance_miles, ordinal, name, admin1, country_name= where_is_the_iss_now()\n latitude, longitude, distance_miles = float(latitude), float(longitude), float(distance_miles)\n return statement(iss_location).standard_card(\n title=\"Location of the International Space Station\",\n text='Latitude {} and Longitude {},\\n {} miles {} of {}, {} in {}'.format(round(latitude,2), round(longitude,2), round(distance_miles,0), ordinal, name, admin1, country_name))\n\n\[email protected]('AMAZON.FallbackIntent')\ndef fallback():\n to_continue = render_template('to_continue')\n return question('Sorry, I am not sure what you asked me...{}'.format(to_continue))\n\n\[email protected]('AMAZON.NavigateHomeIntent')\ndef go_home():\n return question('et - phone home')\n\n\[email protected]('AMAZON.HelpIntent')\ndef help_me():\n help_me_text = render_template('help')\n return question(help_me_text)\n\n\[email protected]('Credits')\ndef speak_credits():\n credits_ = render_template('credits')\n return statement(credits_)\n\n\[email protected]('AMAZON.StopIntent')\ndef stop():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]('AMAZON.CancelIntent')\ndef cancel():\n bye_text = render_template('bye')\n return statement(bye_text)\n\n\[email protected]_ended\ndef session_ended():\n return \"{}\", 200\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\n\n\n", "step-ids": [ 8, 9, 11, 12, 17 ] }
[ 8, 9, 11, 12, 17 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> screen.setup(width=600, height=600) screen.bgcolor('black') screen.title('Snake Game') screen.tracer(0) <|reserved_special_token_0|> screen.listen() screen.onkey(snake.up, 'Up') screen.onkey(snake.down, 'Down') screen.onkey(snake.left, 'Left') screen.onkey(snake.right, 'Right') <|reserved_special_token_0|> while game_is_on: screen.update() time.sleep(0.1) snake.move() if snake.segment[0].distance(food) < 15: food.refresh() snake.extend() score.increase_score() if snake.segment[0].xcor() > 280 or snake.segment[0].xcor( ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor( ) < -280: game_is_on = False score.gameover() for seg in snake.segment: if seg == snake.segment[0]: continue if snake.segment[0].distance(seg) < 10: game_is_on = False score.gameover() screen.exitonclick() <|reserved_special_token_1|> <|reserved_special_token_0|> screen = Screen() screen.setup(width=600, height=600) screen.bgcolor('black') screen.title('Snake Game') screen.tracer(0) snake = Snake() food = Food() score = Scoreboard() screen.listen() screen.onkey(snake.up, 'Up') screen.onkey(snake.down, 'Down') screen.onkey(snake.left, 'Left') screen.onkey(snake.right, 'Right') game_is_on = True while game_is_on: screen.update() time.sleep(0.1) snake.move() if snake.segment[0].distance(food) < 15: food.refresh() snake.extend() score.increase_score() if snake.segment[0].xcor() > 280 or snake.segment[0].xcor( ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor( ) < -280: game_is_on = False score.gameover() for seg in snake.segment: if seg == snake.segment[0]: continue if snake.segment[0].distance(seg) < 10: game_is_on = False score.gameover() screen.exitonclick() <|reserved_special_token_1|> from turtle import Screen import time from snake import Snake from snake_food import Food from snake_score import Scoreboard screen = Screen() screen.setup(width=600, height=600) screen.bgcolor('black') screen.title('Snake Game') screen.tracer(0) snake = Snake() food = Food() score = Scoreboard() screen.listen() screen.onkey(snake.up, 'Up') screen.onkey(snake.down, 'Down') screen.onkey(snake.left, 'Left') screen.onkey(snake.right, 'Right') game_is_on = True while game_is_on: screen.update() time.sleep(0.1) snake.move() if snake.segment[0].distance(food) < 15: food.refresh() snake.extend() score.increase_score() if snake.segment[0].xcor() > 280 or snake.segment[0].xcor( ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor( ) < -280: game_is_on = False score.gameover() for seg in snake.segment: if seg == snake.segment[0]: continue if snake.segment[0].distance(seg) < 10: game_is_on = False score.gameover() screen.exitonclick() <|reserved_special_token_1|> from turtle import Screen import time from snake import Snake from snake_food import Food from snake_score import Scoreboard screen = Screen() screen.setup(width=600,height=600) screen.bgcolor("black") screen.title("Snake Game") screen.tracer(0) snake = Snake() food=Food() score=Scoreboard() screen.listen() screen.onkey(snake.up,"Up") screen.onkey(snake.down,"Down") screen.onkey(snake.left,"Left") screen.onkey(snake.right,"Right") game_is_on = True while game_is_on: screen.update() time.sleep(0.1) snake.move() if (snake.segment[0].distance(food))<15: food.refresh() # screen.update() snake.extend() score.increase_score() if snake.segment[0].xcor() > 280 or snake.segment[0].xcor() < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor() < -280: game_is_on=False score.gameover() for seg in snake.segment: if (seg==snake.segment[0]): continue if snake.segment[0].distance(seg)<10: game_is_on = False score.gameover() screen.exitonclick()
flexible
{ "blob_id": "cfc0ca0d8528937526f6c42721870f1739a2ae95", "index": 5467, "step-1": "<mask token>\n", "step-2": "<mask token>\nscreen.setup(width=600, height=600)\nscreen.bgcolor('black')\nscreen.title('Snake Game')\nscreen.tracer(0)\n<mask token>\nscreen.listen()\nscreen.onkey(snake.up, 'Up')\nscreen.onkey(snake.down, 'Down')\nscreen.onkey(snake.left, 'Left')\nscreen.onkey(snake.right, 'Right')\n<mask token>\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n snake.move()\n if snake.segment[0].distance(food) < 15:\n food.refresh()\n snake.extend()\n score.increase_score()\n if snake.segment[0].xcor() > 280 or snake.segment[0].xcor(\n ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor(\n ) < -280:\n game_is_on = False\n score.gameover()\n for seg in snake.segment:\n if seg == snake.segment[0]:\n continue\n if snake.segment[0].distance(seg) < 10:\n game_is_on = False\n score.gameover()\nscreen.exitonclick()\n", "step-3": "<mask token>\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor('black')\nscreen.title('Snake Game')\nscreen.tracer(0)\nsnake = Snake()\nfood = Food()\nscore = Scoreboard()\nscreen.listen()\nscreen.onkey(snake.up, 'Up')\nscreen.onkey(snake.down, 'Down')\nscreen.onkey(snake.left, 'Left')\nscreen.onkey(snake.right, 'Right')\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n snake.move()\n if snake.segment[0].distance(food) < 15:\n food.refresh()\n snake.extend()\n score.increase_score()\n if snake.segment[0].xcor() > 280 or snake.segment[0].xcor(\n ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor(\n ) < -280:\n game_is_on = False\n score.gameover()\n for seg in snake.segment:\n if seg == snake.segment[0]:\n continue\n if snake.segment[0].distance(seg) < 10:\n game_is_on = False\n score.gameover()\nscreen.exitonclick()\n", "step-4": "from turtle import Screen\nimport time\nfrom snake import Snake\nfrom snake_food import Food\nfrom snake_score import Scoreboard\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor('black')\nscreen.title('Snake Game')\nscreen.tracer(0)\nsnake = Snake()\nfood = Food()\nscore = Scoreboard()\nscreen.listen()\nscreen.onkey(snake.up, 'Up')\nscreen.onkey(snake.down, 'Down')\nscreen.onkey(snake.left, 'Left')\nscreen.onkey(snake.right, 'Right')\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n snake.move()\n if snake.segment[0].distance(food) < 15:\n food.refresh()\n snake.extend()\n score.increase_score()\n if snake.segment[0].xcor() > 280 or snake.segment[0].xcor(\n ) < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor(\n ) < -280:\n game_is_on = False\n score.gameover()\n for seg in snake.segment:\n if seg == snake.segment[0]:\n continue\n if snake.segment[0].distance(seg) < 10:\n game_is_on = False\n score.gameover()\nscreen.exitonclick()\n", "step-5": "from turtle import Screen\r\nimport time\r\nfrom snake import Snake\r\nfrom snake_food import Food\r\nfrom snake_score import Scoreboard\r\n\r\nscreen = Screen()\r\nscreen.setup(width=600,height=600)\r\nscreen.bgcolor(\"black\")\r\nscreen.title(\"Snake Game\")\r\nscreen.tracer(0)\r\n\r\nsnake = Snake()\r\nfood=Food()\r\nscore=Scoreboard()\r\n\r\nscreen.listen()\r\nscreen.onkey(snake.up,\"Up\")\r\nscreen.onkey(snake.down,\"Down\")\r\nscreen.onkey(snake.left,\"Left\")\r\nscreen.onkey(snake.right,\"Right\")\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n screen.update()\r\n time.sleep(0.1)\r\n snake.move()\r\n\r\n if (snake.segment[0].distance(food))<15:\r\n food.refresh()\r\n # screen.update()\r\n snake.extend()\r\n score.increase_score()\r\n\r\n if snake.segment[0].xcor() > 280 or snake.segment[0].xcor() < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor() < -280:\r\n game_is_on=False\r\n score.gameover()\r\n\r\n for seg in snake.segment:\r\n if (seg==snake.segment[0]):\r\n continue\r\n if snake.segment[0].distance(seg)<10:\r\n game_is_on = False\r\n score.gameover()\r\n\r\nscreen.exitonclick()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# pyre-ignore-all-errors # Copyright (c) The Diem Core Contributors # SPDX-License-Identifier: Apache-2.0 from wallet.storage import db_session, engine, Base from wallet.storage.models import User, Account from wallet.types import RegistrationStatus from diem_utils.types.currencies import FiatCurrency def clear_db() -> None: Base.metadata.drop_all(bind=engine) Base.metadata.create_all(bind=engine) def setup_fake_data() -> None: clear_db() fake_users = [ User( username="sunmi", registration_status=RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.USD, selected_language="en", password_salt="123", password_hash="deadbeef", is_admin=True, first_name="First1", last_name="Last1", account=Account(), ), User( username="sunyc", registration_status=RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.USD, selected_language="en", password_salt="123", password_hash="deadbeef", is_admin=False, first_name="First2", last_name="Last2", account=Account(), ), User( username="rustie", registration_status=RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.USD, selected_language="en", password_salt="123", password_hash="deadbeef", is_admin=False, first_name="First3", last_name="Last3", account=Account(), ), ] for user in fake_users: db_session.add(user) try: db_session.commit() except Exception as e: db_session.rollback() db_session.flush()
normal
{ "blob_id": "a6bd10723bd89dd08605f7a4abf17ccf9726b3f5", "index": 8937, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef setup_fake_data() ->None:\n clear_db()\n fake_users = [User(username='sunmi', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=True, first_name='First1', last_name='Last1',\n account=Account()), User(username='sunyc', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First2', last_name='Last2',\n account=Account()), User(username='rustie', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First3', last_name='Last3',\n account=Account())]\n for user in fake_users:\n db_session.add(user)\n try:\n db_session.commit()\n except Exception as e:\n db_session.rollback()\n db_session.flush()\n", "step-3": "<mask token>\n\n\ndef clear_db() ->None:\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n\ndef setup_fake_data() ->None:\n clear_db()\n fake_users = [User(username='sunmi', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=True, first_name='First1', last_name='Last1',\n account=Account()), User(username='sunyc', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First2', last_name='Last2',\n account=Account()), User(username='rustie', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First3', last_name='Last3',\n account=Account())]\n for user in fake_users:\n db_session.add(user)\n try:\n db_session.commit()\n except Exception as e:\n db_session.rollback()\n db_session.flush()\n", "step-4": "from wallet.storage import db_session, engine, Base\nfrom wallet.storage.models import User, Account\nfrom wallet.types import RegistrationStatus\nfrom diem_utils.types.currencies import FiatCurrency\n\n\ndef clear_db() ->None:\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n\ndef setup_fake_data() ->None:\n clear_db()\n fake_users = [User(username='sunmi', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=True, first_name='First1', last_name='Last1',\n account=Account()), User(username='sunyc', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First2', last_name='Last2',\n account=Account()), User(username='rustie', registration_status=\n RegistrationStatus.Registered, selected_fiat_currency=FiatCurrency.\n USD, selected_language='en', password_salt='123', password_hash=\n 'deadbeef', is_admin=False, first_name='First3', last_name='Last3',\n account=Account())]\n for user in fake_users:\n db_session.add(user)\n try:\n db_session.commit()\n except Exception as e:\n db_session.rollback()\n db_session.flush()\n", "step-5": "# pyre-ignore-all-errors\n\n# Copyright (c) The Diem Core Contributors\n# SPDX-License-Identifier: Apache-2.0\n\nfrom wallet.storage import db_session, engine, Base\nfrom wallet.storage.models import User, Account\nfrom wallet.types import RegistrationStatus\nfrom diem_utils.types.currencies import FiatCurrency\n\n\ndef clear_db() -> None:\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n\ndef setup_fake_data() -> None:\n clear_db()\n\n fake_users = [\n User(\n username=\"sunmi\",\n registration_status=RegistrationStatus.Registered,\n selected_fiat_currency=FiatCurrency.USD,\n selected_language=\"en\",\n password_salt=\"123\",\n password_hash=\"deadbeef\",\n is_admin=True,\n first_name=\"First1\",\n last_name=\"Last1\",\n account=Account(),\n ),\n User(\n username=\"sunyc\",\n registration_status=RegistrationStatus.Registered,\n selected_fiat_currency=FiatCurrency.USD,\n selected_language=\"en\",\n password_salt=\"123\",\n password_hash=\"deadbeef\",\n is_admin=False,\n first_name=\"First2\",\n last_name=\"Last2\",\n account=Account(),\n ),\n User(\n username=\"rustie\",\n registration_status=RegistrationStatus.Registered,\n selected_fiat_currency=FiatCurrency.USD,\n selected_language=\"en\",\n password_salt=\"123\",\n password_hash=\"deadbeef\",\n is_admin=False,\n first_name=\"First3\",\n last_name=\"Last3\",\n account=Account(),\n ),\n ]\n\n for user in fake_users:\n db_session.add(user)\n\n try:\n db_session.commit()\n except Exception as e:\n db_session.rollback()\n db_session.flush()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def bio_shortener(bio): lines = [] x = len(bio) / 30 y = 0 Status = True while Status: y = y + 1 lines.append(bio[0:30]) lines.append('\n') bio = bio[30:] if y == int(x) + 1: Status = False A = ''.join(lines) return A def nb_checker(nb): if nb != 'None': return nb.text else: nb def quick_search(username): print('Collecting username information...') insta_url = 'https://instagram.com/' + username + '/' chromeBrowser.get(insta_url) WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath ('//*[@id="loginForm"]/div/div[1]/div/label/input')) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password ) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div[1]/div[3]/button').click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button')) chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button').click() try: instaName = chromeBrowser.find_element_by_class_name('rhpdm').text except: instaName = 'None' try: instaBio = chromeBrowser.find_element_by_xpath( '/html/body/div[1]/section/main/div/header/section/div[2]/span' ).text except: instaBio = 'None' try: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]' ).text except NameError: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a' ).text except: instaPersonalSite = 'None' sleep(1) chromeBrowser.get('https://stackoverflow.com/users/') WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input')) chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username) sleep(1) try: Name = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a') if str(Name.text.lower()) == username.lower(): placeholder = True except: placeholder = False try: sofLocation = chromeBrowser.find_element_by_class_name('user-location' ).text except: sofLocation = 'None' try: sofUser_tag = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text except: sofUser_tag = 'None' try: chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a' ).click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]' )) except: placeholder = True try: sofBio = chromeBrowser.find_element_by_xpath( '//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text except: sofBio = 'None' githubUrl = 'https://api.github.com/users/' + username try: with urllib.request.urlopen(githubUrl) as url: githubData = json.loads(url.read().decode()) gitName = str(githubData['name']) gitCompany = str(githubData['company']) gitBlog = str(githubData['blog']) gitEmail = str(githubData['email']) gitBio = str(githubData['bio']) gitTwitter = str(githubData['twitter_username']) gitLocation = str(githubData['location']) except: placeholder = True pt = PrettyTable([' ', ' Instagram ', ' StackOverflow ', ' GitHub ']) pt.add_row(['Name', instaName, 'X', gitName]) pt.add_row(['Email', 'X', 'X', gitEmail]) pt.add_row(['Company', 'X', 'X', gitCompany]) pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog]) pt.add_row(['Location', 'X', sofLocation, gitLocation]) pt.add_row(['Twitter', 'X', 'X', gitTwitter]) pt.add_row(['Tags', 'X', sofUser_tag, 'X']) pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)]) print(pt) input() <|reserved_special_token_1|> <|reserved_special_token_0|> chrome_options.add_argument('--headless') chrome_options.add_argument('--incognito') chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) <|reserved_special_token_0|> def bio_shortener(bio): lines = [] x = len(bio) / 30 y = 0 Status = True while Status: y = y + 1 lines.append(bio[0:30]) lines.append('\n') bio = bio[30:] if y == int(x) + 1: Status = False A = ''.join(lines) return A def nb_checker(nb): if nb != 'None': return nb.text else: nb def quick_search(username): print('Collecting username information...') insta_url = 'https://instagram.com/' + username + '/' chromeBrowser.get(insta_url) WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath ('//*[@id="loginForm"]/div/div[1]/div/label/input')) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password ) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div[1]/div[3]/button').click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button')) chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button').click() try: instaName = chromeBrowser.find_element_by_class_name('rhpdm').text except: instaName = 'None' try: instaBio = chromeBrowser.find_element_by_xpath( '/html/body/div[1]/section/main/div/header/section/div[2]/span' ).text except: instaBio = 'None' try: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]' ).text except NameError: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a' ).text except: instaPersonalSite = 'None' sleep(1) chromeBrowser.get('https://stackoverflow.com/users/') WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input')) chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username) sleep(1) try: Name = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a') if str(Name.text.lower()) == username.lower(): placeholder = True except: placeholder = False try: sofLocation = chromeBrowser.find_element_by_class_name('user-location' ).text except: sofLocation = 'None' try: sofUser_tag = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text except: sofUser_tag = 'None' try: chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a' ).click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]' )) except: placeholder = True try: sofBio = chromeBrowser.find_element_by_xpath( '//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text except: sofBio = 'None' githubUrl = 'https://api.github.com/users/' + username try: with urllib.request.urlopen(githubUrl) as url: githubData = json.loads(url.read().decode()) gitName = str(githubData['name']) gitCompany = str(githubData['company']) gitBlog = str(githubData['blog']) gitEmail = str(githubData['email']) gitBio = str(githubData['bio']) gitTwitter = str(githubData['twitter_username']) gitLocation = str(githubData['location']) except: placeholder = True pt = PrettyTable([' ', ' Instagram ', ' StackOverflow ', ' GitHub ']) pt.add_row(['Name', instaName, 'X', gitName]) pt.add_row(['Email', 'X', 'X', gitEmail]) pt.add_row(['Company', 'X', 'X', gitCompany]) pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog]) pt.add_row(['Location', 'X', sofLocation, gitLocation]) pt.add_row(['Twitter', 'X', 'X', gitTwitter]) pt.add_row(['Tags', 'X', sofUser_tag, 'X']) pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)]) print(pt) input() <|reserved_special_token_1|> <|reserved_special_token_0|> chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--headless') chrome_options.add_argument('--incognito') chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) chromeBrowser = webdriver.Chrome(chromePath, options=chrome_options) def bio_shortener(bio): lines = [] x = len(bio) / 30 y = 0 Status = True while Status: y = y + 1 lines.append(bio[0:30]) lines.append('\n') bio = bio[30:] if y == int(x) + 1: Status = False A = ''.join(lines) return A def nb_checker(nb): if nb != 'None': return nb.text else: nb def quick_search(username): print('Collecting username information...') insta_url = 'https://instagram.com/' + username + '/' chromeBrowser.get(insta_url) WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath ('//*[@id="loginForm"]/div/div[1]/div/label/input')) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password ) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div[1]/div[3]/button').click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button')) chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button').click() try: instaName = chromeBrowser.find_element_by_class_name('rhpdm').text except: instaName = 'None' try: instaBio = chromeBrowser.find_element_by_xpath( '/html/body/div[1]/section/main/div/header/section/div[2]/span' ).text except: instaBio = 'None' try: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]' ).text except NameError: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a' ).text except: instaPersonalSite = 'None' sleep(1) chromeBrowser.get('https://stackoverflow.com/users/') WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input')) chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username) sleep(1) try: Name = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a') if str(Name.text.lower()) == username.lower(): placeholder = True except: placeholder = False try: sofLocation = chromeBrowser.find_element_by_class_name('user-location' ).text except: sofLocation = 'None' try: sofUser_tag = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text except: sofUser_tag = 'None' try: chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a' ).click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]' )) except: placeholder = True try: sofBio = chromeBrowser.find_element_by_xpath( '//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text except: sofBio = 'None' githubUrl = 'https://api.github.com/users/' + username try: with urllib.request.urlopen(githubUrl) as url: githubData = json.loads(url.read().decode()) gitName = str(githubData['name']) gitCompany = str(githubData['company']) gitBlog = str(githubData['blog']) gitEmail = str(githubData['email']) gitBio = str(githubData['bio']) gitTwitter = str(githubData['twitter_username']) gitLocation = str(githubData['location']) except: placeholder = True pt = PrettyTable([' ', ' Instagram ', ' StackOverflow ', ' GitHub ']) pt.add_row(['Name', instaName, 'X', gitName]) pt.add_row(['Email', 'X', 'X', gitEmail]) pt.add_row(['Company', 'X', 'X', gitCompany]) pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog]) pt.add_row(['Location', 'X', sofLocation, gitLocation]) pt.add_row(['Twitter', 'X', 'X', gitTwitter]) pt.add_row(['Tags', 'X', sofUser_tag, 'X']) pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)]) print(pt) input() <|reserved_special_token_1|> from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from prettytable import PrettyTable from time import sleep from customization import * import urllib.request, json chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--headless') chrome_options.add_argument('--incognito') chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) chromeBrowser = webdriver.Chrome(chromePath, options=chrome_options) def bio_shortener(bio): lines = [] x = len(bio) / 30 y = 0 Status = True while Status: y = y + 1 lines.append(bio[0:30]) lines.append('\n') bio = bio[30:] if y == int(x) + 1: Status = False A = ''.join(lines) return A def nb_checker(nb): if nb != 'None': return nb.text else: nb def quick_search(username): print('Collecting username information...') insta_url = 'https://instagram.com/' + username + '/' chromeBrowser.get(insta_url) WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath ('//*[@id="loginForm"]/div/div[1]/div/label/input')) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password ) chromeBrowser.find_element_by_xpath( '//*[@id="loginForm"]/div[1]/div[3]/button').click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button')) chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/div/div/div/button').click() try: instaName = chromeBrowser.find_element_by_class_name('rhpdm').text except: instaName = 'None' try: instaBio = chromeBrowser.find_element_by_xpath( '/html/body/div[1]/section/main/div/header/section/div[2]/span' ).text except: instaBio = 'None' try: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]' ).text except NameError: instaPersonalSite = chromeBrowser.find_element_by_xpath( '//*[@id="react-root"]/section/main/div/header/section/div[2]/a' ).text except: instaPersonalSite = 'None' sleep(1) chromeBrowser.get('https://stackoverflow.com/users/') WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input')) chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username) sleep(1) try: Name = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a') if str(Name.text.lower()) == username.lower(): placeholder = True except: placeholder = False try: sofLocation = chromeBrowser.find_element_by_class_name('user-location' ).text except: sofLocation = 'None' try: sofUser_tag = chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text except: sofUser_tag = 'None' try: chromeBrowser.find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a' ).click() WebDriverWait(chromeBrowser, 10).until(lambda d: d. find_element_by_xpath( '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]' )) except: placeholder = True try: sofBio = chromeBrowser.find_element_by_xpath( '//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text except: sofBio = 'None' githubUrl = 'https://api.github.com/users/' + username try: with urllib.request.urlopen(githubUrl) as url: githubData = json.loads(url.read().decode()) gitName = str(githubData['name']) gitCompany = str(githubData['company']) gitBlog = str(githubData['blog']) gitEmail = str(githubData['email']) gitBio = str(githubData['bio']) gitTwitter = str(githubData['twitter_username']) gitLocation = str(githubData['location']) except: placeholder = True pt = PrettyTable([' ', ' Instagram ', ' StackOverflow ', ' GitHub ']) pt.add_row(['Name', instaName, 'X', gitName]) pt.add_row(['Email', 'X', 'X', gitEmail]) pt.add_row(['Company', 'X', 'X', gitCompany]) pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog]) pt.add_row(['Location', 'X', sofLocation, gitLocation]) pt.add_row(['Twitter', 'X', 'X', gitTwitter]) pt.add_row(['Tags', 'X', sofUser_tag, 'X']) pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)]) print(pt) input() <|reserved_special_token_1|> from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from prettytable import PrettyTable from time import sleep from customization import * import urllib.request,json chrome_options=webdriver.ChromeOptions() chrome_options.add_argument("--headless") chrome_options.add_argument("--incognito") chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) chromeBrowser = webdriver.Chrome(chromePath, options=chrome_options) def bio_shortener(bio): lines=[] x=len(bio)/30 y=0 Status=True while Status: y=y+1 lines.append(bio[0:30]) lines.append("\n") bio=bio[30:] if y==int(x)+1: Status=False A=''.join(lines) return A def nb_checker(nb): if nb!='None': return nb.text else: nb def quick_search(username): print("Collecting username information...") insta_url="https://instagram.com/"+username+"/" chromeBrowser.get(insta_url) WebDriverWait(chromeBrowser,5).until(lambda d: d.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input')) chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email) chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password) chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div[1]/div[3]/button').click() WebDriverWait(chromeBrowser,10).until(lambda d: d.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button')) chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button').click() try: instaName=chromeBrowser.find_element_by_class_name('rhpdm').text except: instaName="None" try: instaBio=chromeBrowser.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[2]/span').text except: instaBio="None" try: instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]').text except NameError: instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a').text except: instaPersonalSite='None' sleep(1) chromeBrowser.get('https://stackoverflow.com/users/') WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input')) chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username) sleep(1) try: Name=chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a') if str(Name.text.lower())==username.lower(): placeholder=True except: placeholder=False try: sofLocation=chromeBrowser.find_element_by_class_name('user-location').text except: sofLocation='None' try: sofUser_tag = chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text except: sofUser_tag='None' try: chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a').click() WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]')) except: placeholder=True try: sofBio=chromeBrowser.find_element_by_xpath('//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text except: sofBio='None' githubUrl = "https://api.github.com/users/" + username try: with urllib.request.urlopen(githubUrl) as url: githubData = json.loads(url.read().decode()) gitName=str(githubData['name']) gitCompany=str(githubData['company']) gitBlog=str(githubData['blog']) gitEmail=str(githubData['email']) gitBio=str(githubData['bio']) gitTwitter=str(githubData['twitter_username']) gitLocation=str(githubData['location']) except: placeholder=True pt = PrettyTable( [' ', ' Instagram ', ' StackOverflow ', ' GitHub ']) pt.add_row(["Name", instaName,"X", gitName]) pt.add_row(["Email", "X","X",gitEmail]) pt.add_row(["Company","X","X", gitCompany]) pt.add_row(["Personal Site", instaPersonalSite,"X", gitBlog]) pt.add_row(["Location", "X", sofLocation, gitLocation]) pt.add_row(["Twitter", "X", "X", gitTwitter]) pt.add_row(["Tags", "X", sofUser_tag, "X"]) pt.add_row(["Biography", bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)]) print(pt) input()
flexible
{ "blob_id": "e1c902ef340a0a5538b41a03cc93686e0dd31672", "index": 8788, "step-1": "<mask token>\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n", "step-2": "<mask token>\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\n<mask token>\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n", "step-3": "<mask token>\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n", "step-4": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom prettytable import PrettyTable\nfrom time import sleep\nfrom customization import *\nimport urllib.request, json\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--incognito')\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\n\n\ndef bio_shortener(bio):\n lines = []\n x = len(bio) / 30\n y = 0\n Status = True\n while Status:\n y = y + 1\n lines.append(bio[0:30])\n lines.append('\\n')\n bio = bio[30:]\n if y == int(x) + 1:\n Status = False\n A = ''.join(lines)\n return A\n\n\ndef nb_checker(nb):\n if nb != 'None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print('Collecting username information...')\n insta_url = 'https://instagram.com/' + username + '/'\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser, 5).until(lambda d: d.find_element_by_xpath\n ('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password\n )\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName = chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName = 'None'\n try:\n instaBio = chromeBrowser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/div[2]/span'\n ).text\n except:\n instaBio = 'None'\n try:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]'\n ).text\n except NameError:\n instaPersonalSite = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a'\n ).text\n except:\n instaPersonalSite = 'None'\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower()) == username.lower():\n placeholder = True\n except:\n placeholder = False\n try:\n sofLocation = chromeBrowser.find_element_by_class_name('user-location'\n ).text\n except:\n sofLocation = 'None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag = 'None'\n try:\n chromeBrowser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a'\n ).click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.\n find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'\n ))\n except:\n placeholder = True\n try:\n sofBio = chromeBrowser.find_element_by_xpath(\n '//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio = 'None'\n githubUrl = 'https://api.github.com/users/' + username\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName = str(githubData['name'])\n gitCompany = str(githubData['company'])\n gitBlog = str(githubData['blog'])\n gitEmail = str(githubData['email'])\n gitBio = str(githubData['bio'])\n gitTwitter = str(githubData['twitter_username'])\n gitLocation = str(githubData['location'])\n except:\n placeholder = True\n pt = PrettyTable([' ', ' Instagram ',\n ' StackOverflow ', ' GitHub '])\n pt.add_row(['Name', instaName, 'X', gitName])\n pt.add_row(['Email', 'X', 'X', gitEmail])\n pt.add_row(['Company', 'X', 'X', gitCompany])\n pt.add_row(['Personal Site', instaPersonalSite, 'X', gitBlog])\n pt.add_row(['Location', 'X', sofLocation, gitLocation])\n pt.add_row(['Twitter', 'X', 'X', gitTwitter])\n pt.add_row(['Tags', 'X', sofUser_tag, 'X'])\n pt.add_row(['Biography', bio_shortener(instaBio), bio_shortener(sofBio),\n bio_shortener(gitBio)])\n print(pt)\n input()\n", "step-5": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom prettytable import PrettyTable\nfrom time import sleep\nfrom customization import *\n\nimport urllib.request,json\nchrome_options=webdriver.ChromeOptions()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--incognito\")\nchrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\nchromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)\ndef bio_shortener(bio):\n lines=[]\n x=len(bio)/30\n y=0\n Status=True\n while Status:\n y=y+1\n lines.append(bio[0:30])\n lines.append(\"\\n\")\n bio=bio[30:]\n if y==int(x)+1:\n Status=False\n\n A=''.join(lines)\n return A\n\ndef nb_checker(nb):\n if nb!='None':\n return nb.text\n else:\n nb\n\n\ndef quick_search(username):\n print(\"Collecting username information...\")\n insta_url=\"https://instagram.com/\"+username+\"/\"\n chromeBrowser.get(insta_url)\n WebDriverWait(chromeBrowser,5).until(lambda d: d.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input'))\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input').send_keys(i_email)\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[2]/div/label/input').send_keys(i_password)\n chromeBrowser.find_element_by_xpath('//*[@id=\"loginForm\"]/div[1]/div[3]/button').click()\n WebDriverWait(chromeBrowser,10).until(lambda d: d.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div/div/button'))\n chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\n try:\n instaName=chromeBrowser.find_element_by_class_name('rhpdm').text\n except:\n instaName=\"None\"\n try:\n instaBio=chromeBrowser.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[2]/span').text\n except:\n instaBio=\"None\"\n try:\n instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a[1]').text\n except NameError:\n instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/div[2]/a').text\n except:\n instaPersonalSite='None'\n\n sleep(1)\n chromeBrowser.get('https://stackoverflow.com/users/')\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))\n chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)\n sleep(1)\n try:\n Name=chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')\n if str(Name.text.lower())==username.lower():\n placeholder=True\n except:\n placeholder=False\n try:\n sofLocation=chromeBrowser.find_element_by_class_name('user-location').text\n except:\n sofLocation='None'\n try:\n sofUser_tag = chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text\n except:\n sofUser_tag='None'\n try:\n chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a').click()\n WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'))\n except:\n placeholder=True\n try:\n sofBio=chromeBrowser.find_element_by_xpath('//*[@id=\"user-card\"]/div/div[2]/div/div[1]/div/div[2]').text\n except:\n sofBio='None'\n\n githubUrl = \"https://api.github.com/users/\" + username\n\n try:\n with urllib.request.urlopen(githubUrl) as url:\n githubData = json.loads(url.read().decode())\n gitName=str(githubData['name'])\n gitCompany=str(githubData['company'])\n gitBlog=str(githubData['blog'])\n gitEmail=str(githubData['email'])\n gitBio=str(githubData['bio'])\n gitTwitter=str(githubData['twitter_username'])\n gitLocation=str(githubData['location'])\n except:\n placeholder=True\n\n pt = PrettyTable(\n [' ', ' Instagram ', ' StackOverflow ', ' GitHub '])\n pt.add_row([\"Name\", instaName,\"X\", gitName])\n pt.add_row([\"Email\", \"X\",\"X\",gitEmail])\n pt.add_row([\"Company\",\"X\",\"X\", gitCompany])\n pt.add_row([\"Personal Site\", instaPersonalSite,\"X\", gitBlog])\n pt.add_row([\"Location\", \"X\", sofLocation, gitLocation])\n pt.add_row([\"Twitter\", \"X\", \"X\", gitTwitter])\n pt.add_row([\"Tags\", \"X\", sofUser_tag, \"X\"])\n pt.add_row([\"Biography\", bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)])\n print(pt)\n input()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
class Solution(object): def maxDistToClosest(self, seats): """ :type seats: List[int] :rtype: int """ start = 0 end = 0 length = len(seats) max_distance = 0 for i in range(len(seats)): seat = seats[i] if seat == 1: if start == 0 or end == length - 1: max_distance = max(max_distance, end - start + 1) else: max_distance = max(max_distance, (end - start + 1) / 2 + (end - start + 1) % 2) if i + 1 < length: start = end = i + 1 else: end = i if start == 0 or end == length - 1: max_distance = max(max_distance, end - start + 1) else: max_distance = max(max_distance, (end - start + 1) / 2 + (end - start + 1) % 2) return max_distance
normal
{ "blob_id": "2b8b502381e35ef8e56bc150114a8a4831782c5a", "index": 3819, "step-1": "<mask token>\n", "step-2": "class Solution(object):\n <mask token>\n", "step-3": "class Solution(object):\n\n def maxDistToClosest(self, seats):\n \"\"\"\n :type seats: List[int]\n :rtype: int\n \"\"\"\n start = 0\n end = 0\n length = len(seats)\n max_distance = 0\n for i in range(len(seats)):\n seat = seats[i]\n if seat == 1:\n if start == 0 or end == length - 1:\n max_distance = max(max_distance, end - start + 1)\n else:\n max_distance = max(max_distance, (end - start + 1) / 2 +\n (end - start + 1) % 2)\n if i + 1 < length:\n start = end = i + 1\n else:\n end = i\n if start == 0 or end == length - 1:\n max_distance = max(max_distance, end - start + 1)\n else:\n max_distance = max(max_distance, (end - start + 1) / 2 + (end -\n start + 1) % 2)\n return max_distance\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import numpy as np a = np.array([1, 2, 3]) b = np.r_[np.repeat(a, 3), np.tile(a, 3)] print(b)
normal
{ "blob_id": "f39945f35b13c0918c3ef06224bca65ae6166ebc", "index": 5892, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(b)\n", "step-3": "<mask token>\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n", "step-4": "import numpy as np\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os import json import random chapter_mode = True setname = 'test_other' use_chapter = '_chapter' minlen = 1000 maxlen = 1000 context = '_1000' info_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context) book_ID_mapping = {} with open('speaker_book.txt') as fin: for line in fin: elems = line.split('|') ID = elems[0].lstrip().strip() speaker = elems[1].lstrip().strip() subset = elems[3].lstrip().strip() book = elems[5].lstrip().strip() if (speaker, book) not in book_ID_mapping: book_ID_mapping[(speaker, book)] = [ID] else: book_ID_mapping[(speaker, book)].append(ID) with open(info_json) as fin: spk_bookwords = json.load(fin) worddict = set() with open('../all_rare_words.txt') as fin: for line in fin: word = line.strip() worddict.add(word) worddict_full = {} with open('word_freq.txt') as fin: for line in fin: word, freq = line.split() worddict_full[word] = int(freq) spk_book_KB = {} KBfulllist = set() for speaker, books in spk_bookwords.items(): # spk_book_KB[speaker] = {} for book, content in books.items(): speaker_book_IDs = book_ID_mapping[(speaker, book)] if 'chapter' not in info_json else [speaker] for speaker_book_ID in speaker_book_IDs: spk_book_KB[speaker_book_ID] = [] bookwords = content['bookwords'] oovwords = content['oovwords'] for word in bookwords: if word in worddict: spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0)) if word not in KBfulllist: KBfulllist.add(word) for word in oovwords: if word in worddict: spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0)) if word not in KBfulllist: KBfulllist.add(word) full_wordlist = list(KBfulllist) output_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen) os.system('mkdir -p {}'.format(output_path)) worddict = list(worddict) for ID, KB in spk_book_KB.items(): random.shuffle(worddict) count = 0 while len(KB) < minlen and count < len(worddict): word = worddict[count] freq = worddict_full[word] if word in worddict_full else 0 if (word, freq) not in KB: KB.append((word, freq)) count += 1 KB.sort(key=lambda tup: tup[1]) with open(os.path.join(output_path, ID), 'w') as fout: for word, freq in KB[:maxlen]: fout.write(word+'\n')
normal
{ "blob_id": "3b41bd59c133bb04dae3aa48dc0699388d5bf3d4", "index": 8346, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\n<mask token>\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\n<mask token>\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\n<mask token>\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n<mask token>\nos.system('mkdir -p {}'.format(output_path))\n<mask token>\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n", "step-3": "<mask token>\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\nspk_book_KB = {}\nKBfulllist = set()\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n", "step-4": "import os\nimport json\nimport random\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\nspk_book_KB = {}\nKBfulllist = set()\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n", "step-5": "import os\nimport json\nimport random\n\n\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\n\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[(speaker, book)] = [ID]\n else:\n book_ID_mapping[(speaker, book)].append(ID)\n\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\n\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\n\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\n\nspk_book_KB = {}\n\nKBfulllist = set()\n\nfor speaker, books in spk_bookwords.items():\n # spk_book_KB[speaker] = {}\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[(speaker, book)] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0)) \n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word+'\\n')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python # coding: utf-8 from os.path import dirname, abspath PICKITEMSP = True RAREP = True REPAIRP = False ITEMS = { "legendary": ["#02CE01", # set "#BF642F"], # legndary "rare": ["#BBBB00"] } current_abpath = abspath(dirname(__file__)) + "/" # With py2exe the dirname is INSTPATH/server/library.zip. So # current_abpath will be INSTPATH/server/library.zip/ if current_abpath[-12:] == "library.zip/": current_abpath = current_abpath[:-12] imgs_dir = current_abpath + "imgs\\" def get_item_colors(): ''' >>> get_item_colors() ''' result = [] if not PICKITEMSP: return result if RAREP: for a in ITEMS: result += ITEMS[a] return result else: result = ITEMS["legendary"] return result
normal
{ "blob_id": "927b42326ad62f5e484fd7016c42a44b93609f83", "index": 1296, "step-1": "<mask token>\n", "step-2": "<mask token>\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\n<mask token>\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-3": "<mask token>\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-4": "from os.path import dirname, abspath\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-5": "#!/usr/bin/python\r\n# coding: utf-8\r\n\r\nfrom os.path import dirname, abspath\r\n\r\nPICKITEMSP = True\r\nRAREP\t = True\r\nREPAIRP = False\r\n\r\nITEMS = {\r\n \"legendary\": [\"#02CE01\", # set\r\n \"#BF642F\"], # legndary\r\n \"rare\":\t [\"#BBBB00\"]\r\n }\r\n\r\ncurrent_abpath = abspath(dirname(__file__)) + \"/\"\r\n# With py2exe the dirname is INSTPATH/server/library.zip. So\r\n# current_abpath will be INSTPATH/server/library.zip/\r\nif current_abpath[-12:] == \"library.zip/\":\r\n current_abpath = current_abpath[:-12]\r\n\r\nimgs_dir = current_abpath + \"imgs\\\\\"\r\n\r\n\r\ndef get_item_colors():\r\n '''\r\n >>> get_item_colors()\r\n '''\r\n result = []\r\n if not PICKITEMSP: return result\r\n \r\n if RAREP:\r\n for a in ITEMS:\r\n result += ITEMS[a]\r\n return result\r\n else:\r\n result = ITEMS[\"legendary\"]\r\n return result\r\n \r\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from datetime import datetime as dt YEAR = dt.today().year BINARY_LOCATION = {'binary_location': 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'} CHROME_DRIVER_PATH = r'C:\Users\pavithra\Downloads\chromedriver_win32\chromedriver.exe' EXTRACTED_DIR = r'C:\Users\pavithra\Documents\fintuple-automation-projects\BseBhavCopy\dailybhavcopy\dailybhavcopy' \ r'\csv_files' ZIP_DIR = r'C:\Users\pavithra\Documents\fintuple-automation-projects\BseBhavCopy\dailybhavcopy\dailybhavcopy\zip_files' HEADLESS_OPTIONS = {'headless': '--headless', 'window_size': '--window-size=1920x1080'} DOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR, 'download.prompt_for_download': False} def enable_download(driver, directory): """ :param driver: Selenium web driver :param directory: Directory to store the file This function allows the Selenium web driver to store the file in the given directory. """ driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command') params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': directory}} driver.execute("send_command", params)
normal
{ "blob_id": "95422348c8db9753830cc0a7c8785c05b44886b1", "index": 842, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef enable_download(driver, directory):\n \"\"\"\n\n :param driver: Selenium web driver\n :param directory: Directory to store the file\n\n This function allows the Selenium web driver to store the file in the given directory.\n \"\"\"\n driver.command_executor._commands['send_command'\n ] = 'POST', '/session/$sessionId/chromium/send_command'\n params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':\n 'allow', 'downloadPath': directory}}\n driver.execute('send_command', params)\n", "step-3": "<mask token>\nYEAR = dt.today().year\nBINARY_LOCATION = {'binary_location':\n 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}\nCHROME_DRIVER_PATH = (\n 'C:\\\\Users\\\\pavithra\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\nEXTRACTED_DIR = (\n 'C:\\\\Users\\\\pavithra\\\\Documents\\\\fintuple-automation-projects\\\\BseBhavCopy\\\\dailybhavcopy\\\\dailybhavcopy\\\\csv_files'\n )\nZIP_DIR = (\n 'C:\\\\Users\\\\pavithra\\\\Documents\\\\fintuple-automation-projects\\\\BseBhavCopy\\\\dailybhavcopy\\\\dailybhavcopy\\\\zip_files'\n )\nHEADLESS_OPTIONS = {'headless': '--headless', 'window_size':\n '--window-size=1920x1080'}\nDOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,\n 'download.prompt_for_download': False}\n\n\ndef enable_download(driver, directory):\n \"\"\"\n\n :param driver: Selenium web driver\n :param directory: Directory to store the file\n\n This function allows the Selenium web driver to store the file in the given directory.\n \"\"\"\n driver.command_executor._commands['send_command'\n ] = 'POST', '/session/$sessionId/chromium/send_command'\n params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':\n 'allow', 'downloadPath': directory}}\n driver.execute('send_command', params)\n", "step-4": "from datetime import datetime as dt\nYEAR = dt.today().year\nBINARY_LOCATION = {'binary_location':\n 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}\nCHROME_DRIVER_PATH = (\n 'C:\\\\Users\\\\pavithra\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\nEXTRACTED_DIR = (\n 'C:\\\\Users\\\\pavithra\\\\Documents\\\\fintuple-automation-projects\\\\BseBhavCopy\\\\dailybhavcopy\\\\dailybhavcopy\\\\csv_files'\n )\nZIP_DIR = (\n 'C:\\\\Users\\\\pavithra\\\\Documents\\\\fintuple-automation-projects\\\\BseBhavCopy\\\\dailybhavcopy\\\\dailybhavcopy\\\\zip_files'\n )\nHEADLESS_OPTIONS = {'headless': '--headless', 'window_size':\n '--window-size=1920x1080'}\nDOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,\n 'download.prompt_for_download': False}\n\n\ndef enable_download(driver, directory):\n \"\"\"\n\n :param driver: Selenium web driver\n :param directory: Directory to store the file\n\n This function allows the Selenium web driver to store the file in the given directory.\n \"\"\"\n driver.command_executor._commands['send_command'\n ] = 'POST', '/session/$sessionId/chromium/send_command'\n params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':\n 'allow', 'downloadPath': directory}}\n driver.execute('send_command', params)\n", "step-5": "from datetime import datetime as dt\n\nYEAR = dt.today().year\nBINARY_LOCATION = {'binary_location': 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}\nCHROME_DRIVER_PATH = r'C:\\Users\\pavithra\\Downloads\\chromedriver_win32\\chromedriver.exe'\nEXTRACTED_DIR = r'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy' \\\n r'\\csv_files'\nZIP_DIR = r'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy\\zip_files'\nHEADLESS_OPTIONS = {'headless': '--headless',\n 'window_size': '--window-size=1920x1080'}\nDOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,\n 'download.prompt_for_download': False}\n\n\ndef enable_download(driver, directory):\n \"\"\"\n\n :param driver: Selenium web driver\n :param directory: Directory to store the file\n\n This function allows the Selenium web driver to store the file in the given directory.\n \"\"\"\n driver.command_executor._commands[\"send_command\"] = (\"POST\", '/session/$sessionId/chromium/send_command')\n params = {'cmd': 'Page.setDownloadBehavior',\n 'params': {'behavior': 'allow',\n 'downloadPath': directory}}\n driver.execute(\"send_command\", params)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def results(request): team1damage = 0 team2damage = 0 winner = run(1, 2) team1 = Team.objects.get(pk=1) team2 = Team.objects.get(pk=2) player1 = Player.objects.get(pk=1) player2 = Player.objects.get(pk=2) player3 = Player.objects.get(pk=3) player4 = Player.objects.get(pk=4) player5 = Player.objects.get(pk=5) player6 = Player.objects.get(pk=6) player7 = Player.objects.get(pk=7) player8 = Player.objects.get(pk=8) player9 = Player.objects.get(pk=9) player10 = Player.objects.get(pk=10) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] for i in range(5): team1damage += team1list[i].damage_dealt team2damage += team2list[i].damage_dealt team1damage = round(team1damage, 2) team2damage = round(team2damage, 2) team1hp = round(500.0 - team2damage, 2) if team1hp <= 0.0: team1hp = 0.0 team2hp = round(500.0 - team1damage, 2) if team2hp <= 0.0: team2hp = 0.0 return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp}) <|reserved_special_token_1|> <|reserved_special_token_0|> def startgame(request): match = Match(team1_pk=1, team2_pk=2) team1 = Team.objects.get(pk=match.team1_pk) team2 = Team.objects.get(pk=match.team2_pk) player1 = Player.objects.get(pk=match.team1_pk * 5 - 4) player2 = Player.objects.get(pk=match.team1_pk * 5 - 3) player3 = Player.objects.get(pk=match.team1_pk * 5 - 2) player4 = Player.objects.get(pk=match.team1_pk * 5 - 1) player5 = Player.objects.get(pk=match.team1_pk * 5 - 0) player6 = Player.objects.get(pk=match.team2_pk * 5 - 4) player7 = Player.objects.get(pk=match.team2_pk * 5 - 3) player8 = Player.objects.get(pk=match.team2_pk * 5 - 2) player9 = Player.objects.get(pk=match.team2_pk * 5 - 1) player10 = Player.objects.get(pk=match.team2_pk * 5 - 0) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] return render(request, 'match/startgame.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list}) def results(request): team1damage = 0 team2damage = 0 winner = run(1, 2) team1 = Team.objects.get(pk=1) team2 = Team.objects.get(pk=2) player1 = Player.objects.get(pk=1) player2 = Player.objects.get(pk=2) player3 = Player.objects.get(pk=3) player4 = Player.objects.get(pk=4) player5 = Player.objects.get(pk=5) player6 = Player.objects.get(pk=6) player7 = Player.objects.get(pk=7) player8 = Player.objects.get(pk=8) player9 = Player.objects.get(pk=9) player10 = Player.objects.get(pk=10) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] for i in range(5): team1damage += team1list[i].damage_dealt team2damage += team2list[i].damage_dealt team1damage = round(team1damage, 2) team2damage = round(team2damage, 2) team1hp = round(500.0 - team2damage, 2) if team1hp <= 0.0: team1hp = 0.0 team2hp = round(500.0 - team1damage, 2) if team2hp <= 0.0: team2hp = 0.0 return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp}) <|reserved_special_token_1|> from django.shortcuts import render, redirect from .game import run from .models import Match from team.models import Team, Player from django.urls import reverse def startgame(request): match = Match(team1_pk=1, team2_pk=2) team1 = Team.objects.get(pk=match.team1_pk) team2 = Team.objects.get(pk=match.team2_pk) player1 = Player.objects.get(pk=match.team1_pk * 5 - 4) player2 = Player.objects.get(pk=match.team1_pk * 5 - 3) player3 = Player.objects.get(pk=match.team1_pk * 5 - 2) player4 = Player.objects.get(pk=match.team1_pk * 5 - 1) player5 = Player.objects.get(pk=match.team1_pk * 5 - 0) player6 = Player.objects.get(pk=match.team2_pk * 5 - 4) player7 = Player.objects.get(pk=match.team2_pk * 5 - 3) player8 = Player.objects.get(pk=match.team2_pk * 5 - 2) player9 = Player.objects.get(pk=match.team2_pk * 5 - 1) player10 = Player.objects.get(pk=match.team2_pk * 5 - 0) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] return render(request, 'match/startgame.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list}) def results(request): team1damage = 0 team2damage = 0 winner = run(1, 2) team1 = Team.objects.get(pk=1) team2 = Team.objects.get(pk=2) player1 = Player.objects.get(pk=1) player2 = Player.objects.get(pk=2) player3 = Player.objects.get(pk=3) player4 = Player.objects.get(pk=4) player5 = Player.objects.get(pk=5) player6 = Player.objects.get(pk=6) player7 = Player.objects.get(pk=7) player8 = Player.objects.get(pk=8) player9 = Player.objects.get(pk=9) player10 = Player.objects.get(pk=10) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] for i in range(5): team1damage += team1list[i].damage_dealt team2damage += team2list[i].damage_dealt team1damage = round(team1damage, 2) team2damage = round(team2damage, 2) team1hp = round(500.0 - team2damage, 2) if team1hp <= 0.0: team1hp = 0.0 team2hp = round(500.0 - team1damage, 2) if team2hp <= 0.0: team2hp = 0.0 return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp}) <|reserved_special_token_1|> from django.shortcuts import render, redirect from .game import run from .models import Match from team.models import Team, Player from django.urls import reverse # Create your views here. def startgame(request): match = Match(team1_pk = 1, team2_pk = 2) team1 = Team.objects.get(pk = match.team1_pk) team2 = Team.objects.get(pk = match.team2_pk) player1 = Player.objects.get(pk = match.team1_pk * 5 - 4) player2 = Player.objects.get(pk = match.team1_pk * 5 - 3) player3 = Player.objects.get(pk = match.team1_pk * 5 - 2) player4 = Player.objects.get(pk = match.team1_pk * 5 - 1) player5 = Player.objects.get(pk = match.team1_pk * 5 - 0) player6 = Player.objects.get(pk = match.team2_pk * 5 - 4) player7 = Player.objects.get(pk = match.team2_pk * 5 - 3) player8 = Player.objects.get(pk = match.team2_pk * 5 - 2) player9 = Player.objects.get(pk = match.team2_pk * 5 - 1) player10 = Player.objects.get(pk = match.team2_pk * 5 - 0) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] return render(request, 'match/startgame.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list}) def results(request): team1damage = 0 team2damage = 0 winner = run(1, 2) team1 = Team.objects.get(pk = 1) team2 = Team.objects.get(pk = 2) player1 = Player.objects.get(pk = 1) player2 = Player.objects.get(pk = 2) player3 = Player.objects.get(pk = 3) player4 = Player.objects.get(pk = 4) player5 = Player.objects.get(pk = 5) player6 = Player.objects.get(pk = 6) player7 = Player.objects.get(pk = 7) player8 = Player.objects.get(pk = 8) player9 = Player.objects.get(pk = 9) player10 = Player.objects.get(pk = 10) team1list = [player1, player2, player3, player4, player5] team2list = [player6, player7, player8, player9, player10] for i in range(5): team1damage += team1list[i].damage_dealt team2damage += team2list[i].damage_dealt team1damage = round(team1damage, 2) team2damage = round(team2damage, 2) team1hp = round(500.0 - team2damage, 2) if team1hp <= 0.0: team1hp = 0.0 team2hp = round(500.0 - team1damage, 2) if team2hp <= 0.0: team2hp = 0.0 return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp})
flexible
{ "blob_id": "e1829904cea51909b3a1729b9a18d40872e7c13c", "index": 6163, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef results(request):\n team1damage = 0\n team2damage = 0\n winner = run(1, 2)\n team1 = Team.objects.get(pk=1)\n team2 = Team.objects.get(pk=2)\n player1 = Player.objects.get(pk=1)\n player2 = Player.objects.get(pk=2)\n player3 = Player.objects.get(pk=3)\n player4 = Player.objects.get(pk=4)\n player5 = Player.objects.get(pk=5)\n player6 = Player.objects.get(pk=6)\n player7 = Player.objects.get(pk=7)\n player8 = Player.objects.get(pk=8)\n player9 = Player.objects.get(pk=9)\n player10 = Player.objects.get(pk=10)\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n for i in range(5):\n team1damage += team1list[i].damage_dealt\n team2damage += team2list[i].damage_dealt\n team1damage = round(team1damage, 2)\n team2damage = round(team2damage, 2)\n team1hp = round(500.0 - team2damage, 2)\n if team1hp <= 0.0:\n team1hp = 0.0\n team2hp = round(500.0 - team1damage, 2)\n if team2hp <= 0.0:\n team2hp = 0.0\n return render(request, 'match/results.html', {'team1': team1, 'team2':\n team2, 'team1list': team1list, 'team2list': team2list, 'winner':\n winner, 'team1damage': team1damage, 'team2damage': team2damage,\n 'team1hp': team1hp, 'team2hp': team2hp})\n", "step-3": "<mask token>\n\n\ndef startgame(request):\n match = Match(team1_pk=1, team2_pk=2)\n team1 = Team.objects.get(pk=match.team1_pk)\n team2 = Team.objects.get(pk=match.team2_pk)\n player1 = Player.objects.get(pk=match.team1_pk * 5 - 4)\n player2 = Player.objects.get(pk=match.team1_pk * 5 - 3)\n player3 = Player.objects.get(pk=match.team1_pk * 5 - 2)\n player4 = Player.objects.get(pk=match.team1_pk * 5 - 1)\n player5 = Player.objects.get(pk=match.team1_pk * 5 - 0)\n player6 = Player.objects.get(pk=match.team2_pk * 5 - 4)\n player7 = Player.objects.get(pk=match.team2_pk * 5 - 3)\n player8 = Player.objects.get(pk=match.team2_pk * 5 - 2)\n player9 = Player.objects.get(pk=match.team2_pk * 5 - 1)\n player10 = Player.objects.get(pk=match.team2_pk * 5 - 0)\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n return render(request, 'match/startgame.html', {'team1': team1, 'team2':\n team2, 'team1list': team1list, 'team2list': team2list})\n\n\ndef results(request):\n team1damage = 0\n team2damage = 0\n winner = run(1, 2)\n team1 = Team.objects.get(pk=1)\n team2 = Team.objects.get(pk=2)\n player1 = Player.objects.get(pk=1)\n player2 = Player.objects.get(pk=2)\n player3 = Player.objects.get(pk=3)\n player4 = Player.objects.get(pk=4)\n player5 = Player.objects.get(pk=5)\n player6 = Player.objects.get(pk=6)\n player7 = Player.objects.get(pk=7)\n player8 = Player.objects.get(pk=8)\n player9 = Player.objects.get(pk=9)\n player10 = Player.objects.get(pk=10)\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n for i in range(5):\n team1damage += team1list[i].damage_dealt\n team2damage += team2list[i].damage_dealt\n team1damage = round(team1damage, 2)\n team2damage = round(team2damage, 2)\n team1hp = round(500.0 - team2damage, 2)\n if team1hp <= 0.0:\n team1hp = 0.0\n team2hp = round(500.0 - team1damage, 2)\n if team2hp <= 0.0:\n team2hp = 0.0\n return render(request, 'match/results.html', {'team1': team1, 'team2':\n team2, 'team1list': team1list, 'team2list': team2list, 'winner':\n winner, 'team1damage': team1damage, 'team2damage': team2damage,\n 'team1hp': team1hp, 'team2hp': team2hp})\n", "step-4": "from django.shortcuts import render, redirect\nfrom .game import run\nfrom .models import Match\nfrom team.models import Team, Player\nfrom django.urls import reverse\n\n\ndef startgame(request):\n match = Match(team1_pk=1, team2_pk=2)\n team1 = Team.objects.get(pk=match.team1_pk)\n team2 = Team.objects.get(pk=match.team2_pk)\n player1 = Player.objects.get(pk=match.team1_pk * 5 - 4)\n player2 = Player.objects.get(pk=match.team1_pk * 5 - 3)\n player3 = Player.objects.get(pk=match.team1_pk * 5 - 2)\n player4 = Player.objects.get(pk=match.team1_pk * 5 - 1)\n player5 = Player.objects.get(pk=match.team1_pk * 5 - 0)\n player6 = Player.objects.get(pk=match.team2_pk * 5 - 4)\n player7 = Player.objects.get(pk=match.team2_pk * 5 - 3)\n player8 = Player.objects.get(pk=match.team2_pk * 5 - 2)\n player9 = Player.objects.get(pk=match.team2_pk * 5 - 1)\n player10 = Player.objects.get(pk=match.team2_pk * 5 - 0)\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n return render(request, 'match/startgame.html', {'team1': team1, 'team2':\n team2, 'team1list': team1list, 'team2list': team2list})\n\n\ndef results(request):\n team1damage = 0\n team2damage = 0\n winner = run(1, 2)\n team1 = Team.objects.get(pk=1)\n team2 = Team.objects.get(pk=2)\n player1 = Player.objects.get(pk=1)\n player2 = Player.objects.get(pk=2)\n player3 = Player.objects.get(pk=3)\n player4 = Player.objects.get(pk=4)\n player5 = Player.objects.get(pk=5)\n player6 = Player.objects.get(pk=6)\n player7 = Player.objects.get(pk=7)\n player8 = Player.objects.get(pk=8)\n player9 = Player.objects.get(pk=9)\n player10 = Player.objects.get(pk=10)\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n for i in range(5):\n team1damage += team1list[i].damage_dealt\n team2damage += team2list[i].damage_dealt\n team1damage = round(team1damage, 2)\n team2damage = round(team2damage, 2)\n team1hp = round(500.0 - team2damage, 2)\n if team1hp <= 0.0:\n team1hp = 0.0\n team2hp = round(500.0 - team1damage, 2)\n if team2hp <= 0.0:\n team2hp = 0.0\n return render(request, 'match/results.html', {'team1': team1, 'team2':\n team2, 'team1list': team1list, 'team2list': team2list, 'winner':\n winner, 'team1damage': team1damage, 'team2damage': team2damage,\n 'team1hp': team1hp, 'team2hp': team2hp})\n", "step-5": "from django.shortcuts import render, redirect\nfrom .game import run\nfrom .models import Match\nfrom team.models import Team, Player\nfrom django.urls import reverse\n\n# Create your views here.\n\ndef startgame(request):\n match = Match(team1_pk = 1, team2_pk = 2)\n\n team1 = Team.objects.get(pk = match.team1_pk)\n team2 = Team.objects.get(pk = match.team2_pk)\n\n player1 = Player.objects.get(pk = match.team1_pk * 5 - 4)\n player2 = Player.objects.get(pk = match.team1_pk * 5 - 3)\n player3 = Player.objects.get(pk = match.team1_pk * 5 - 2)\n player4 = Player.objects.get(pk = match.team1_pk * 5 - 1)\n player5 = Player.objects.get(pk = match.team1_pk * 5 - 0)\n player6 = Player.objects.get(pk = match.team2_pk * 5 - 4)\n player7 = Player.objects.get(pk = match.team2_pk * 5 - 3)\n player8 = Player.objects.get(pk = match.team2_pk * 5 - 2)\n player9 = Player.objects.get(pk = match.team2_pk * 5 - 1)\n player10 = Player.objects.get(pk = match.team2_pk * 5 - 0)\n\n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n \n return render(request, 'match/startgame.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list})\n\ndef results(request):\n team1damage = 0\n team2damage = 0\n\n winner = run(1, 2)\n \n team1 = Team.objects.get(pk = 1)\n team2 = Team.objects.get(pk = 2)\n \n player1 = Player.objects.get(pk = 1)\n player2 = Player.objects.get(pk = 2)\n player3 = Player.objects.get(pk = 3)\n player4 = Player.objects.get(pk = 4)\n player5 = Player.objects.get(pk = 5)\n player6 = Player.objects.get(pk = 6)\n player7 = Player.objects.get(pk = 7)\n player8 = Player.objects.get(pk = 8)\n player9 = Player.objects.get(pk = 9)\n player10 = Player.objects.get(pk = 10)\n \n team1list = [player1, player2, player3, player4, player5]\n team2list = [player6, player7, player8, player9, player10]\n \n for i in range(5):\n team1damage += team1list[i].damage_dealt\n team2damage += team2list[i].damage_dealt\n\n team1damage = round(team1damage, 2)\n team2damage = round(team2damage, 2)\n\n team1hp = round(500.0 - team2damage, 2)\n if team1hp <= 0.0: \n team1hp = 0.0\n\n team2hp = round(500.0 - team1damage, 2)\n if team2hp <= 0.0:\n team2hp = 0.0\n\n return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp})", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append('coin_flipping_src') <|reserved_special_token_0|> plt.style.use('bmh') <|reserved_special_token_0|> plt.plot(x_coords, probablility_results, linewidth=2.5) for _ in range(5): plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords], linewidth=0.75) plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5']) plt.xlabel('Number of Heads') plt.ylabel('Probability') plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips') plt.savefig('plot.png') plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append('coin_flipping_src') <|reserved_special_token_0|> plt.style.use('bmh') x_coords = range(10) probablility_results = [probability(x, 10) for x in x_coords] plt.plot(x_coords, probablility_results, linewidth=2.5) for _ in range(5): plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords], linewidth=0.75) plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5']) plt.xlabel('Number of Heads') plt.ylabel('Probability') plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips') plt.savefig('plot.png') plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt import sys sys.path.append('coin_flipping_src') from monte_carlo import monte_carlo from probability import probability plt.style.use('bmh') x_coords = range(10) probablility_results = [probability(x, 10) for x in x_coords] plt.plot(x_coords, probablility_results, linewidth=2.5) for _ in range(5): plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords], linewidth=0.75) plt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5']) plt.xlabel('Number of Heads') plt.ylabel('Probability') plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips') plt.savefig('plot.png') plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt import sys sys.path.append('coin_flipping_src') from monte_carlo import monte_carlo from probability import probability plt.style.use('bmh') x_coords = range(10) probablility_results = [probability(x,10) for x in x_coords] plt.plot(x_coords,probablility_results,linewidth = 2.5) # plt.plot([0,1,2,3,4],[0.1, 0.3, 0.5, 0.1, 0.1],linewidth=2.5) for _ in range(5): plt.plot(x_coords,[monte_carlo(x,10,100) for x in x_coords],linewidth = 0.75) # plt.plot([0,1,2,3,4],[0.3, 0.1, 0.4, 0.2, 0.1],linewidth=0.75) # plt.plot([0,1,2,3,4],[0.2, 0.2, 0.3, 0.3, 0.2],linewidth=0.75) plt.legend(['True','MC 1','MC 2','MC 3','MC 4','MC 5']) plt.xlabel('Number of Heads') plt.ylabel('Probability') plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips') plt.savefig('plot.png') plt.show()
flexible
{ "blob_id": "124d7da330aa7c869320e10f4f89cc1c872f85f2", "index": 430, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append('coin_flipping_src')\n<mask token>\nplt.style.use('bmh')\n<mask token>\nplt.plot(x_coords, probablility_results, linewidth=2.5)\nfor _ in range(5):\n plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords],\n linewidth=0.75)\nplt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])\nplt.xlabel('Number of Heads')\nplt.ylabel('Probability')\nplt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')\nplt.savefig('plot.png')\nplt.show()\n", "step-3": "<mask token>\nsys.path.append('coin_flipping_src')\n<mask token>\nplt.style.use('bmh')\nx_coords = range(10)\nprobablility_results = [probability(x, 10) for x in x_coords]\nplt.plot(x_coords, probablility_results, linewidth=2.5)\nfor _ in range(5):\n plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords],\n linewidth=0.75)\nplt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])\nplt.xlabel('Number of Heads')\nplt.ylabel('Probability')\nplt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')\nplt.savefig('plot.png')\nplt.show()\n", "step-4": "import matplotlib.pyplot as plt\nimport sys\nsys.path.append('coin_flipping_src')\nfrom monte_carlo import monte_carlo\nfrom probability import probability\nplt.style.use('bmh')\nx_coords = range(10)\nprobablility_results = [probability(x, 10) for x in x_coords]\nplt.plot(x_coords, probablility_results, linewidth=2.5)\nfor _ in range(5):\n plt.plot(x_coords, [monte_carlo(x, 10, 100) for x in x_coords],\n linewidth=0.75)\nplt.legend(['True', 'MC 1', 'MC 2', 'MC 3', 'MC 4', 'MC 5'])\nplt.xlabel('Number of Heads')\nplt.ylabel('Probability')\nplt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')\nplt.savefig('plot.png')\nplt.show()\n", "step-5": "import matplotlib.pyplot as plt\nimport sys\nsys.path.append('coin_flipping_src')\nfrom monte_carlo import monte_carlo\nfrom probability import probability\nplt.style.use('bmh')\nx_coords = range(10)\nprobablility_results = [probability(x,10) for x in x_coords]\nplt.plot(x_coords,probablility_results,linewidth = 2.5)\n# plt.plot([0,1,2,3,4],[0.1, 0.3, 0.5, 0.1, 0.1],linewidth=2.5)\nfor _ in range(5):\n plt.plot(x_coords,[monte_carlo(x,10,100) for x in x_coords],linewidth = 0.75)\n# plt.plot([0,1,2,3,4],[0.3, 0.1, 0.4, 0.2, 0.1],linewidth=0.75)\n# plt.plot([0,1,2,3,4],[0.2, 0.2, 0.3, 0.3, 0.2],linewidth=0.75)\nplt.legend(['True','MC 1','MC 2','MC 3','MC 4','MC 5'])\nplt.xlabel('Number of Heads')\nplt.ylabel('Probability')\nplt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')\nplt.savefig('plot.png')\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python ''' ** dmcalc ** Estimates the Dispersion Measure (DM) from the data in psrfits file format. Returns the DM value with its uncertainty and reduced chi-square from tempo2 DM fit. Dependencies ------------- PSRCHIVE with python interface: http://psrchive.sourceforge.net/ TEMPO2: https://bitbucket.org/psrsoft/tempo2 SKLEARN: https://scikit-learn.org/stable/install.html Parameters ---------- file(s) : Input file(s) in psrfits format ephem : Ephemeris (or parameter) file of the pulsar. This is required to update the model. It can be given as a command line argument. If it is available in "PWD/ephemerides" folder, one can use that. Giving the file with this option overrides the default one. model : Template profile for cross-correlating with the observation to obtain DM. It can be given as a command line argument, otherwise it will look for a matching one in "PWD/ephemerides" directory and if found, will use that instead. One can use this option to override the default selection. fscrunch : int, optional, default: None. Factor for scrunching the frequency channels before passing it to DM estimation. b3fscrunch : int, optional, default: None. Factor for scrunching the BAND3 data of uGMRT before passing it to DM estimation. b3fscrunch : int, optional, default: None. Factor for scrunching the BAND5 data of uGMRT before passing it to DM estimation. offset : float, optional, default: None. Fix for jump between BAND3 and BAND5 of uGMRT bands. writeout : bool, optional, default: False. Writes out the file corrected for DM in a default directory (PWD/PSRJ_{site}_final), using the following options to reduce the file. plot : bool, optional, default: True. Prints the data analysis plot in a PDF file. ToA rejection steps and DM corrected ToAs are shown in addition to DM corrected frequency evolution of the profile. ptoa : bool, optional, default: False. Prints the outliers cleaned ToAs to a file in the TEMPO2 readable format, so that, if required, it can be used for other purposes. Fscrunch : bool, optional, default: False. Collapse all frequency channels to produce one profile. Tscrunch : bool, optional, default: False. Collapse all sub-integrations to produce one profile. tscrunch : int, optional, default: None. Factor to scrunch sub-integrations for writing out the DM corrected file. quiet : bool, optional, default: False. Supresses all print statements except warnings and errors. Returns ------- Dispersion Measure with uncertainty. Examples -------- # (a) for DM estimation with files in default directories: # dmcalc.py inputfile.fits # # (c) to use different ephemeris and template files: # dmcalc.py -E ephemeris.par -M model.fits data_file.fits # # (d) to write the DM corrected fits file and ToAs: # ./dmcalc2.py -w -ptoa inputfile.fits ''' # import modules... import os import sys import numpy as np import psrchive import argparse import time import warnings import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib import gridspec start = time.time() parser = argparse.ArgumentParser(description='Code for measuring in-band '+ 'DM for pulsar data in psrfits format.') parser.add_argument('files', nargs='+', type=str, help='The list of fits file(s) for processing') parser.add_argument('-E', '--ephem', type=str, help='Ephemeris file to update the model. Exits if not '+ 'given or is not available in "PWD/ephemerides" '+ 'directory') parser.add_argument('-M', '--model', nargs='+', type=str, help='Model template for ToA generation. Exits if not '+ 'given or is not available in "PWD/templates" '+ 'directory') parser.add_argument('-f','--fscrunch', type=int, default=1, help='Factor to scrunch the number of channels for '+ 'doing DM estimation (Def: 1)') parser.add_argument('-b3f','--b3fscrunch', type=int, default=1, help='Factor to scrunch the number of channels for '+ 'band3 GMRT data (Def: 1)') parser.add_argument('-b5f','--b5fscrunch', type=int, default=1, help='Factor to scrunch the number of channels for '+ 'band5 GMRT data (Def: 1)') parser.add_argument('-w','--writeout', action='store_true', help='Writes out the DM corrected file. Def: False') parser.add_argument('-ptoa','--print_toas', action='store_true', help='Print the prefit ToAs to file in tempo2 format. '+ 'Def: False') parser.add_argument('-F','--Fscrunch', action='store_true', help='Fully scrunch the number of channels for the '+ 'final output archive (Def: False)') parser.add_argument('-T','--Tscrunch', action='store_true', help='Completely time scrunch all the integrations') parser.add_argument('-t','--tscrunch', type=int, default=1, help='Factor to scrunch the number of integrations for '+ 'the final output archive (Def: None)') parser.add_argument('-o','--offset', type=float, default=0.670520675, help='Offset to shift band 5 ToAs (in secs)') parser.add_argument('-q', '--quiet', action='store_true', help='Only print warnings') def main(): # parses the input arguments args = parser.parse_args() # checks status of quiet and ptoa quiet=False if args.quiet: quiet=True tempo2=True ptoa=False if args.print_toas: ptoa=True if not quiet: print("Loading the archive files for DM estimation") # loads the psrfits file archives = [] for filename in args.files: archives.append(psrchive.Archive_load(filename)) narch = len(archives) if narch >= 1: if not quiet: print("Appending the archives ..."), # append data ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch, args.b5fscrunch) if not quiet: print(" done!") else: if not quiet: print("Only one archive was given, so nothing to frequency-append.") # ar is the final archive after performing frequency append ar = archives[0] del archives # extracts relevant information from the archive ar_psr = ar.get_source() ar_nbins = ar.get_nbin() ar_tel = ar.get_telescope() mjd_start=ar.get_Integration(0).get_start_time().in_days() mjd_end=ar.get_Integration(0).get_end_time().in_days() ar_mjd = mjd_start + (mjd_end-mjd_start)/2. length = ar.integration_length() ar.update_centre_frequency() ar_centfr = ar.get_centre_frequency() ar_nchan = ar.get_nchan() ar_bw = ar.get_bandwidth() ar_chnwdth = ar_bw / ar_nchan ffrac = args.fscrunch if not quiet: print("\nNow preparing for DM estimation\n") pwd=os.getcwd() # checks for ephemeris file and exit if not given or is not available # in the default directory "PWD/ephemerides". if args.ephem != None: ephemeris = args.ephem else: ephemeris = "ephemerides/"+ar_psr+".par" if not (os.path.exists(ephemeris)): sys.exit(1) if not quiet: print ("\nEphemeris file is:"+ephemeris+'\n') # if template is given as input argument load and process them model = [] for filename in args.model: model.append(psrchive.Archive_load(filename)) if args.model != None: if len(args.model) == 1: model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch) if len(args.model) > 1: model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch) # If the template is not given, looking for a matching template in the templates directory if args.model == None: if not quiet: print("Looking for matching template in templates directory..."), import subprocess tempdir="templates/*.sm" tempfile=ar_psr+'_tmp.txt' a=subprocess.call("psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'" % (tempdir,tempfile), shell=True) tempnchan="" t1=str(ar_nbins) if ar_tel=='gmrt': t2=str(int(ar_bw)) else: t2=str((ar_bw)) t3=('%.2f'%ar_centfr) f = open(tempfile,'r') for line in f: line = line.strip() columns=line.split() t4 = float(columns[5]) t4 = ('%.2f'%t4) if ar_tel=='gmrt': if (columns[1]==ar_psr and columns[2]==t1 and str(int(columns[3]))==t2 and t4==t3): modeltempl=columns[0] tempnchan=columns[4] if not quiet: print (' done\n') else: if (columns[1]==ar_psr and columns[2]==t1 and str((columns[3]))==t2 and t4==t3): modeltempl=columns[0] tempnchan=columns[4] if not quiet: print (' done\n') if modeltempl=='' and tempnchan=='': print("\n** No matching template found for DM fitting. Exiting. **\n") sys.exit(1) f.close() os.remove(tempfile) if not quiet: print("Found matching template: "+modeltempl) model.append(psrchive.Archive_load(modeltempl)) if not quiet: print("\nEstimating the DM from the observation") model.update_centre_frequency() # cloning the original file for passing to DMCalc() routine arch = ar.clone() # Calling the DM estimation routine dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch) # writing out the final DM corrected file, if requested if args.writeout: # removing the DM and DMEPOCH from the ephemeris file for uptation infile = open(ephemeris,"r") tmpeph = ar_psr+'.eph' output = open(tmpeph,"w+") for i, line in enumerate(infile): if not line.lstrip().startswith('DM'): if not line.lstrip().startswith('DMEPOCH'): output.write(line) infile.close() output.close() # updating the ephemeris file with measured DM dmline = "DM "+str(dmval)+"\t\t"+str(dmverr) dmepochline = "DMEPOCH "+str(round(ar_mjd,2)) if not args.quiet: print("Updating the ephemeris with new DM... "), f = open(tmpeph,'a') f.write("%s\n %s\n" % (dmline, dmepochline)) if not args.quiet: print(" done!") f.close() # updating the ephemeris in the archive with the measured DM if not quiet: print("Correcting the DM of the observed file and writing it out... "), os.remove(tmpeph) # creating the directory for writing the file dirfinal=os.path.join(pwd,ar_psr+"_"+ar_tel+"_final") if not os.path.exists(dirfinal): os.makedirs(dirfinal) # filename with path of the DM corrected file outfile = dirfinal+"/"+ar_psr + "_" + str(ar_mjd) + "_" + ar_tel + ".ar" # Setting the DMC flag to 1. In other words, doing the DM correction. ar.set_dispersion_measure(dmval) ar.dedisperse() # Performing different scrunching in the archive for writing out if not args.Tscrunch: ar.tscrunch(args.tscrunch) else: ar.tscrunch() if not args.Fscrunch: ar.fscrunch(ffrac) else: ar.fscrunch() # Writing out the DM corrected, time/frequency scrunched file. ar.unload(outfile) if not args.quiet: print(" done!") del ar if not quiet: print("The file is corrected for DM and is written out to\n"+outfile) # Printing the results to the file and also in the terminal f= open(ar_psr+"_DM_timeseries.txt",'a') f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' %( filename, \ ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err, ar_centfr, \ ar_bw, ar_tel)) f.close() import time end = time.time() total = end - start print ('-----------------------------------------------------------------------------') print ('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel') print ('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr, fitchisq, ar_centfr, ar_bw, ar_tel) ) print ('-----------------------------------------------------------------------------') print("\nThe program took %.1f seconds to finish"%total) #-------------------------------------------------------------------------------------------# ''' Main function that performs the DM estimation ''' def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch): # Checks if model file is available. if model == None: sys.exit(1) init_dm = ar.get_dispersion_measure() # setting up the ToA estimation routine using the psrchive ArrivalTime() if not quiet: print("Using the ArrivalTime (pat) with PGS in Tempo2 format") arrtim = psrchive.ArrivalTime() arrtim.set_shift_estimator('PGS') arrtim.set_format('Tempo2') arrtim.set_format_flags('IPTA') if not quiet: print("Loading the template file for processing... "), std = model.clone() std.pscrunch() std.tscrunch() std_nchan = std.get_nchan() std.dedisperse() std.fscrunch(ffrac) arrtim.set_standard(std) if not quiet: print(" done!") ar.fscrunch(ffrac) ar.pscrunch() ar.tscrunch() arrtim.set_observation(ar) if not quiet: print("Finding the ToAs... "), # Finding the ToAs and reading it into numpy arrays toas = arrtim.get_toas() toas_filtered = [x.split()[:5] for x in toas] str_filename,str_freq,str_mjd,str_toaErr,str_site = zip(*toas_filtered) freq = np.asarray(str_freq, dtype=np.float64) amjd = np.asarray(str_mjd, dtype=np.float64) terr = np.asarray(str_toaErr, dtype=np.float64) if not quiet: print(" done!") print("Removing the bad ToAs using Huber Regression... "), # removing the ToAs with zero errors condition1 = terr < 3*np.median(terr) freqnew = np.extract(condition1,freq) amjdnew = np.extract(condition1,amjd) terrnew = np.extract(condition1,terr) # writing the ToAs to a temporary file for getting the non-phase resolved ToAs using general2 tempfile = ar_psr+"_tmp.txt" f = open(tempfile,"w+") head="FORMAT 1\n" f.write('%s' % head) for i in range(0,np.size(freqnew)): f.write('%s %.12f %.20f %.8f %s\n' % (str_filename[0], freqnew[i], amjdnew[i], terrnew[i], str_site[0])) f.close() tmpstr="tempo2 -output general2 -f" tmp = os.popen(tmpstr+" %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'" % (ephemeris,tempfile)).read() os.remove(tempfile) # extracting the data from general2 output tmp1 = tmp.split('\n') freqtmp = np.zeros(np.size(amjdnew)) toastmp = np.zeros(np.size(amjdnew)) TErrtmp = np.zeros(np.size(amjdnew)) for i in range(np.size(amjdnew)): _,freqtmp[i],toastmp[i],TErrtmp[i] = (tmp1[i].split()) TErrtmp /= 1e+6 # importing libraries for outlier removal from sklearn import linear_model from sklearn.linear_model import HuberRegressor from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline # changing the shape of frequency array freqarr = freqtmp.reshape(-1,1) # making a nu^2 model and fitting using Huber Regression toastmp *= 1e+6 toashift = (np.min(toastmp)*-1.5) toastmp += toashift Terrtmp = TErrtmp*1e+6 model = make_pipeline(PolynomialFeatures(2), HuberRegressor()) model.fit(freqarr,toastmp, huberregressor__sample_weight=np.ravel(1./Terrtmp)) y_pred = model.predict(freqarr) residuals = toastmp - y_pred median = np.median(residuals) MAD = np.median(np.abs(residuals-np.median(residuals)))/0.6744897501960817 # filtering the good ToAs condition2 = (residuals > median - 3*MAD) & (residuals < median + 3*MAD) freqf = np.around(np.extract(condition2,freqarr),3) amjdf = np.extract(condition2,amjdnew) toasf = np.extract(condition2,toastmp) terrf = np.extract(condition2,TErrtmp) prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf)) terrf *= 1e+6 if not quiet: print(" done!") # writing out the ToAs in proper format if ptoa: if not quiet: print ('Writing out ToAs into a file in tempo2 format'), dirtoas=os.path.join(pwd,ar_psr+"_"+ar_tel+"_ToAs") if not os.path.exists(dirtoas): os.makedirs(dirtoas) outfile=dirtoas+"/"+ar_psr+"_"+str(ar_mjd)+"_"+ar_tel+"_ToAs.txt" f = open(outfile,"w+") head="FORMAT 1" f.write('%s\n' % head) for i in range(0,np.size(freqf)): f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0])) f.close() if not quiet: print("done!") # Fitting the ToAs with tempo2 for DM if not quiet: print("\nWriting the ToAs to a temporary file for tempo2 fitting..."), outfiletmp=ar_psr+"tmp_ToAs.txt" f = open(outfiletmp,"w+") head="FORMAT 1" f.write('%s\n' % head) for i in range(0,np.size(freqf)): f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0])) f.close() if not quiet: print(" done!\n") # performing the fit dmstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk \'{print $5,$6}\'" % (ephemeris, outfiletmp)).read() (dm, dmerr) = dmstr.split() dmval = float(dm) dmverr = float(dmerr) # doing the fit again to read the chisquare chisqstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk \'{print $9}\'" % (ephemeris, outfiletmp)).read() fitchisq = float(chisqstr) os.remove(outfiletmp) # Preparing the data for plotting the residuals, prefit and postfit infile = open(ephemeris,"r") tmpeph1 = ar_psr+'_tmpeph.eph' output = open(tmpeph1,"w+") for i, line in enumerate(infile): if not line.lstrip().startswith('DM'): if not line.lstrip().startswith('DMEPOCH'): output.write(line) infile.close() output.close() # updating the ephemeris file with measured DM dmline = "DM "+str(dmval)+"\t1\t"+str(dmverr) dmepochline = "DMEPOCH "+str(round(ar_mjd,2)) f = open(tmpeph1,'a') f.write('%s\n%s\n' % (dmline, dmepochline)) f.close() newarch = ar.clone() newarch.tscrunch() newarch.set_dispersion_measure(dmval) arrtim.set_observation(newarch) arrtim.set_standard(std) toas1 = arrtim.get_toas() toas1_filtered = [x.split()[:5] for x in toas1] str_filename1,str_freq1,str_mjd1,str_toaErr1,str_site1 = zip(*toas1_filtered) freq1 = np.asarray(str_freq1, dtype=np.float64) amjd1 = np.asarray(str_mjd1, dtype=np.float64) terr1 = np.asarray(str_toaErr1, dtype=np.float64) freqnew1 = np.extract(condition1,freq1) amjdnew1 = np.extract(condition1,amjd1) terrnew1 = np.extract(condition1,terr1) tempfile1 = ar_psr+"_tmp1.txt" f = open(tempfile1,"w+") head="FORMAT 1\n" f.write('%s' % head) for i in range(0,np.size(freqnew1)): f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i], amjdnew1[i], terrnew1[i], str_site1[0])) f.close() tmp2 = os.popen("tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'" % (tmpeph1,tempfile1)).read() os.remove(tempfile1) os.remove(tmpeph1) # extracting the data from general2 output tmp3 = tmp2.split('\n') freqtmp2 = np.zeros(np.size(amjdnew1)) toastmp2 = np.zeros(np.size(amjdnew1)) TErrtmp2 = np.zeros(np.size(amjdnew1)) for i in range(np.size(amjdnew1)): _,freqtmp2[i],toastmp2[i],TErrtmp2[i] = (tmp3[i].split()) freqf1 = np.around(np.extract(condition2,freqtmp2),3) amjdf1 = np.extract(condition2,amjdnew1) toasf1 = np.extract(condition2,toastmp2) terrf1 = np.extract(condition2,TErrtmp2) toasf1 *= 1e+6 postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1)) ar_nbin = newarch.get_nbin() ar_nchn = newarch.get_nchan() if (narch == 1): freq_bot = (ar.get_centre_frequency() - ar_bw/2.0) freq_top = (ar.get_centre_frequency() + ar_bw/2.0) if (narch > 1): if (ar_bw == 200.): freq_bot = 400.0 freq_top = 1460.0 if (ar_bw == 400.): freq_bot = 300.0 freq_top = 1460.0 # Getting the profile data for plotting newarch.dedisperse() newarch.remove_baseline() profdata2D = newarch.get_data()[:,0,:,:].flatten().reshape(ar_nchn,ar_nbin) prof = newarch.clone() prof.fscrunch() profdata1D = prof.get_data().flatten() profdata1D /= np.max(profdata1D) residDM = init_dm - dmval dmcurve = 4.15 * 1000. * residDM * ( (1./(np.min(freqf)/1000.)**2) - (1./(freqf/1000.)**2) ) dmoff = np.median(toasf) - np.median(dmcurve) dmcurve += dmoff # Now does the actual plotting fig = plt.figure(3, figsize=(8, 6)) fig.subplots_adjust(hspace=0.05) ax0 = plt.subplot2grid((3, 8), (0,0), rowspan=2, colspan=3) ax1 = plt.subplot2grid((3, 8), (2,0), rowspan=1, colspan=3) ax2 = plt.subplot2grid((3, 8), (0,4), colspan=4) ax3 = plt.subplot2grid((3, 8), (1,4), colspan=4) ax4 = plt.subplot2grid((3, 8), (2,4), colspan=4) ax0.imshow((np.sqrt(profdata2D**2))**0.5, origin='lower', extent=(0,ar_nbin-1,freq_bot,freq_top), aspect='auto', cmap='hot') ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12) ax0.tick_params(axis='x', which='both', bottom=True, top=True, labelbottom=False) ax1.plot(np.arange(ar_nbin, dtype=float),profdata1D, color='black', linewidth=0.5) ax1.set_xlim(0,ar_nbin-1) ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12) ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12) ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp,fmt='.', color='gray', label='Prefit: Unfiltered', capsize=2) ax2.plot(freqtmp, y_pred,'--r', label='Polynomial Fit') ax2.set_xlim(freq_bot, freq_top) ax2.grid() ax2.legend(loc='upper right') ax2.axes.xaxis.set_ticklabels([]) ax3.yaxis.set_label_position("right") ax3.errorbar(freqf, toasf-np.median(toasf), terrf,fmt='.k', label='Prefit: Filtered', capsize=2) ax3.set_xlim(freq_bot, freq_top) ax3.grid() ax3.legend(loc='upper right') ax3.axes.xaxis.set_ticklabels([]) ax3.set_ylabel(r'ToA Residuals ($\mu$s)', fontweight='bold', fontsize=12) ax4.errorbar(freqf1, toasf1-np.median(toasf1), terrf1, fmt='.r', label='Postfit', capsize=2) ax4.set_xlim(freq_bot, freq_top) ax4.grid() ax4.legend(loc='upper right') ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12) fig.suptitle('Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\mu$s; Postfit Wrms: %.2f $\mu$s\nMedian ToA Err: %.2f $\mu$s; DM: %.6f $\pm$ %.6f pc cm$^{-3}$; Reduced $\chi^2$: %.2f' % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold') dirplot=os.path.join(pwd,ar_psr+"_"+ar_tel+"_plots") if not os.path.exists(dirplot): os.makedirs(dirplot) plotfile=dirplot+"/"+ar_psr+"_"+str(ar_mjd)+"_"+str(ar_centfr)+"_"+ar_tel+"_DMfitResid.pdf" plt.savefig(plotfile, format='pdf') plt.close() if not quiet: print ('done!') del ar return(dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)) ''' Frequency appending the data archives ''' def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch): for i in range(narch): archives[i].tscrunch() # GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 # will be dependent on the pulsar period. Default values of this jump given # is from the timing of PSR J1643-1224. # PS: this jump is valid for only cycle 37 dataset (or the given MJD limits). if (archives[0].get_telescope() == 'GMRT'): for i in range(narch): ar_mjd = archives[i].get_Integration(0).get_start_time().in_days() ar_frq = archives[i].get_centre_frequency() ar_bw = archives[i].get_bandwidth() period = (archives[i].get_Integration(0).get_folding_period()) offset = 0.670520675 jump = (offset/period) - int(offset/period) if (ar_frq >= 1260. and ar_frq < 1460.): if (ar_mjd >=58810. and ar_mjd < 58991.): archives[i].rotate_phase(-jump) freq_append = psrchive.FrequencyAppend() ttfreq = archives[0].get_centre_frequency() if (300. < ttfreq < 500.): archives[0].fscrunch(b3scrunch) if (1160. < ttfreq < 1460.): archives[0].fscrunch(b5scrunch) freq_append.init(archives[0]) while len(archives) > 1: ttfreq = archives[1].get_centre_frequency() if (300. < ttfreq < 500.): archives[1].fscrunch(b3scrunch) if (1160. < ttfreq < 1460.): archives[1].fscrunch(b5scrunch) freq_append.append(archives[0],archives[1]) del archives[1] return(archives[0]) ''' Frequency Appending the Templates ''' def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch): for i in range(narch): archives[i].tscrunch() # GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 # will be dependent on the pulsar period. Default values of this jump given # is from the timing of PSR J1643-1224. # PS: this jump is valid for only cycle 37 dataset (or the given MJD limits). if (archives[0].get_telescope() == 'GMRT'): for i in range(narch): ar_mjd = archives[i].get_Integration(0).get_start_time().in_days() ar_frq = archives[i].get_centre_frequency() ar_bw = archives[i].get_bandwidth() period = (archives[i].get_Integration(0).get_folding_period()) offset = 0.670520675 jump = (offset/period) - int(offset/period) if (ar_frq >= 1260. and ar_frq < 1460.): if (ar_mjd >=58810. and ar_mjd < 58991.): archives[i].rotate_phase(-jump) freq_append = psrchive.FrequencyAppend() ttfreq = archives[0].get_centre_frequency() if (300. < ttfreq < 500.): archives[0].fscrunch(b3scrunch) if (1160. < ttfreq < 1460.): archives[0].fscrunch(b5scrunch) freq_append.init(archives[0]) while len(archives) > 1: ttfreq = archives[1].get_centre_frequency() if (300. < ttfreq < 500.): archives[1].fscrunch(b3scrunch) if (1160. < ttfreq < 1460.): archives[1].fscrunch(b5scrunch) freq_append.append(archives[0],archives[1]) del archives[1] return(archives[0]) #----------------------------------------------------------------------------------# main()
normal
{ "blob_id": "e464b465c4bc90c250c0ea02c17b7398d975964b", "index": 1163, "step-1": "<mask token>\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n", "step-2": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n", "step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nstart = time.time()\nparser = argparse.ArgumentParser(description='Code for measuring in-band ' +\n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n", "step-4": "<mask token>\nimport os\nimport sys\nimport numpy as np\nimport psrchive\nimport argparse\nimport time\nimport warnings\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nstart = time.time()\nparser = argparse.ArgumentParser(description='Code for measuring in-band ' +\n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n", "step-5": "#!/usr/bin/python\n'''\n ** dmcalc **\nEstimates the Dispersion Measure (DM) from the data in psrfits file format.\n\nReturns the DM value with its uncertainty and reduced chi-square from tempo2 \nDM fit.\n\nDependencies \n-------------\nPSRCHIVE with python interface: http://psrchive.sourceforge.net/\nTEMPO2: https://bitbucket.org/psrsoft/tempo2\nSKLEARN: https://scikit-learn.org/stable/install.html\n\nParameters\n----------\nfile(s) : Input file(s) in psrfits format\n\nephem : Ephemeris (or parameter) file of the pulsar. This is required \n to update the model. It can be given as a command line argument. \n If it is available in \"PWD/ephemerides\" folder, one can use that.\n Giving the file with this option overrides the default one.\n\nmodel : Template profile for cross-correlating with the observation to\n obtain DM. It can be given as a command line argument, otherwise\n it will look for a matching one in \"PWD/ephemerides\" directory\n and if found, will use that instead. One can use this option to\n override the default selection.\n \nfscrunch : int, optional, default: None. Factor for scrunching the frequency \n channels before passing it to DM estimation.\n\nb3fscrunch : int, optional, default: None. Factor for scrunching the BAND3 \n data of uGMRT before passing it to DM estimation.\n\nb3fscrunch : int, optional, default: None. Factor for scrunching the BAND5 \n data of uGMRT before passing it to DM estimation.\n\noffset : float, optional, default: None. Fix for jump between BAND3 and \n BAND5 of uGMRT bands. \n\nwriteout : bool, optional, default: False. Writes out the file corrected \n for DM in a default directory (PWD/PSRJ_{site}_final), using the\n following options to reduce the file.\n\nplot : bool, optional, default: True. Prints the data analysis plot in\n a PDF file. ToA rejection steps and DM corrected ToAs are shown\n in addition to DM corrected frequency evolution of the profile.\n\nptoa : bool, optional, default: False. Prints the outliers cleaned ToAs \n to a file in the TEMPO2 readable format, so that, if required, \n it can be used for other purposes.\n \nFscrunch : bool, optional, default: False. Collapse all frequency channels\n to produce one profile.\n\nTscrunch : bool, optional, default: False. Collapse all sub-integrations\n to produce one profile.\n\ntscrunch : int, optional, default: None. Factor to scrunch sub-integrations\n for writing out the DM corrected file.\n \nquiet : bool, optional, default: False. Supresses all print statements\n except warnings and errors.\n\nReturns\n-------\nDispersion Measure with uncertainty.\n\n\nExamples\n--------\n# (a) for DM estimation with files in default directories:\n#\ndmcalc.py inputfile.fits\n#\n# (c) to use different ephemeris and template files:\n#\ndmcalc.py -E ephemeris.par -M model.fits data_file.fits\n#\n# (d) to write the DM corrected fits file and ToAs:\n#\n./dmcalc2.py -w -ptoa inputfile.fits\n\n'''\n\n\n# import modules...\nimport os\nimport sys\nimport numpy as np\nimport psrchive\nimport argparse\nimport time\nimport warnings\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nstart = time.time()\n\nparser = argparse.ArgumentParser(description='Code for measuring in-band '+ \n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, \n\t\t\t\t\thelp='The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, \n\t\t\t\t\thelp='Ephemeris file to update the model. Exits if not '+\n\t\t\t\t\t 'given or is not available in \"PWD/ephemerides\" '+\n\t\t\t\t\t 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str,\n\t\t\t\t\thelp='Model template for ToA generation. Exits if not '+ \n\t\t\t\t\t 'given or is not available in \"PWD/templates\" '+\n\t\t\t\t\t 'directory')\nparser.add_argument('-f','--fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f','--b3fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f','--b5fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w','--writeout', action='store_true',\n\t\t\t\t\thelp='Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa','--print_toas', action='store_true',\n\t\t\t\t\thelp='Print the prefit ToAs to file in tempo2 format. '+\n\t\t\t\t\t 'Def: False')\nparser.add_argument('-F','--Fscrunch', action='store_true',\n\t\t\t\t\thelp='Fully scrunch the number of channels for the '+\n\t\t\t\t\t\t 'final output archive (Def: False)')\nparser.add_argument('-T','--Tscrunch', action='store_true',\n\t\t\t\t\thelp='Completely time scrunch all the integrations')\nparser.add_argument('-t','--tscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of integrations for '+ \n\t\t\t\t\t 'the final output archive (Def: None)')\nparser.add_argument('-o','--offset', type=float, default=0.670520675,\n\t\t\t\t\thelp='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', \n\t\t\t\t\t\t\thelp='Only print warnings')\n\n\ndef main():\n\t\n\t# parses the input arguments\n\targs = parser.parse_args()\n\n\t# checks status of quiet and ptoa\n\tquiet=False\n\tif args.quiet:\n\t\tquiet=True\n\ttempo2=True\n\tptoa=False\n\tif args.print_toas:\n\t\tptoa=True\n\t\t\n\tif not quiet:\n\t\tprint(\"Loading the archive files for DM estimation\")\n\n\t# loads the psrfits file\n\tarchives = []\n\tfor filename in args.files:\n\t\tarchives.append(psrchive.Archive_load(filename))\n\tnarch = len(archives)\n\tif narch >= 1:\n\t\tif not quiet:\n\t\t\tprint(\"Appending the archives ...\"),\n\t\t# append data\n\t\tar = freq_appendData(narch, archives, args.offset, \n\t\t\t\t\t\t\targs.b3fscrunch, args.b5fscrunch)\n\t\tif not quiet:\n\t\t\tprint(\" done!\")\n\telse:\n\t\tif not quiet:\n\t\t\tprint(\"Only one archive was given, so nothing to frequency-append.\")\n\t# ar is the final archive after performing frequency append\n\tar = archives[0]\n\tdel archives\n\t\n\t# extracts relevant information from the archive\n\tar_psr = ar.get_source()\n\tar_nbins = ar.get_nbin()\n\tar_tel = ar.get_telescope()\n\tmjd_start=ar.get_Integration(0).get_start_time().in_days()\n\tmjd_end=ar.get_Integration(0).get_end_time().in_days()\n\tar_mjd = mjd_start + (mjd_end-mjd_start)/2.\n\tlength = ar.integration_length()\n\tar.update_centre_frequency()\n\tar_centfr = ar.get_centre_frequency()\n\tar_nchan = ar.get_nchan()\n\tar_bw = ar.get_bandwidth()\n\tar_chnwdth = ar_bw / ar_nchan\n\tffrac = args.fscrunch\n\tif not quiet:\n\t\tprint(\"\\nNow preparing for DM estimation\\n\")\n\n\tpwd=os.getcwd()\n\n\t# checks for ephemeris file and exit if not given or is not available\n\t# in the default directory \"PWD/ephemerides\".\n\tif args.ephem != None:\n\t\tephemeris = args.ephem\n\telse:\n\t\tephemeris = \"ephemerides/\"+ar_psr+\".par\"\n\t\tif not (os.path.exists(ephemeris)):\n\t\t\tsys.exit(1)\n\tif not quiet:\n\t\tprint (\"\\nEphemeris file is:\"+ephemeris+'\\n')\n\t\n\t# if template is given as input argument load and process them\n\tmodel = []\n\tfor filename in args.model:\n\t\tmodel.append(psrchive.Archive_load(filename))\n\tif args.model != None:\n\t\tif len(args.model) == 1:\n\t\t\tmodel = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)\n\t\tif len(args.model) > 1:\n\t\t\tmodel = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)\n\t# If the template is not given, looking for a matching template in the templates directory\n\tif args.model == None:\n\t\tif not quiet:\n\t\t\tprint(\"Looking for matching template in templates directory...\"),\n\t\timport subprocess\n\t\ttempdir=\"templates/*.sm\"\n\t\ttempfile=ar_psr+'_tmp.txt'\n\t\ta=subprocess.call(\"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\"\n\t\t\t\t\t\t\t % (tempdir,tempfile), shell=True)\n\n\t\ttempnchan=\"\"\n\t\tt1=str(ar_nbins)\n\t\tif ar_tel=='gmrt':\n\t\t\tt2=str(int(ar_bw))\n\t\telse:\n\t\t\tt2=str((ar_bw))\n\t\tt3=('%.2f'%ar_centfr)\n\t\tf = open(tempfile,'r')\n\t\tfor line in f:\n\t\t\tline = line.strip()\n\t\t\tcolumns=line.split()\n\t\t\tt4 = float(columns[5])\n\t\t\tt4 = ('%.2f'%t4)\n\t\t\tif ar_tel=='gmrt':\n\t\t\t\tif (columns[1]==ar_psr and columns[2]==t1 and str(int(columns[3]))==t2 and t4==t3):\n\t\t\t\t\tmodeltempl=columns[0]\n\t\t\t\t\ttempnchan=columns[4]\n\t\t\t\t\tif not quiet:\n\t\t\t\t\t\tprint (' done\\n')\n\t\t\telse:\n\t\t\t\tif (columns[1]==ar_psr and columns[2]==t1 and str((columns[3]))==t2 and t4==t3):\n\t\t\t\t\tmodeltempl=columns[0]\n\t\t\t\t\ttempnchan=columns[4]\n\t\t\t\t\tif not quiet:\n\t\t\t\t\t\tprint (' done\\n')\n\t\tif modeltempl=='' and tempnchan=='':\n\t\t\t\n\t\t\tprint(\"\\n** No matching template found for DM fitting. Exiting. **\\n\")\n\t\t\tsys.exit(1)\n\t\tf.close()\n\t\tos.remove(tempfile)\n\t\tif not quiet:\n\t\t\tprint(\"Found matching template: \"+modeltempl)\n\t\tmodel.append(psrchive.Archive_load(modeltempl))\n\tif not quiet:\n\t\tprint(\"\\nEstimating the DM from the observation\")\n\tmodel.update_centre_frequency()\n\n\t# cloning the original file for passing to DMCalc() routine\n\tarch = ar.clone()\n\n\t# Calling the DM estimation routine\t\n\tdmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch, ar_nchan, ar_centfr, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ar_bw, ar_psr, ar_tel, ar_mjd, model, \n\t\t\t\t\t\t\t\t\t \t\t\t\t\t ephemeris, pwd, ffrac, quiet, tempo2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ptoa, narch)\n\t\n\t# writing out the final DM corrected file, if requested\n\tif args.writeout:\n\t\t# removing the DM and DMEPOCH from the ephemeris file for uptation\n\t\tinfile = open(ephemeris,\"r\")\n\t\ttmpeph = ar_psr+'.eph'\n\t\toutput = open(tmpeph,\"w+\")\n\t\tfor i, line in enumerate(infile):\n\t\t\tif not line.lstrip().startswith('DM'):\n\t\t\t\t\tif not line.lstrip().startswith('DMEPOCH'):\n\t\t\t\t\t\toutput.write(line)\n\t\tinfile.close()\n\t\toutput.close()\n\t\t# updating the ephemeris file with measured DM\n\t\tdmline = \"DM\t\t\t \"+str(dmval)+\"\\t\\t\"+str(dmverr)\n\t\tdmepochline = \"DMEPOCH\t\t \"+str(round(ar_mjd,2))\n\t\tif not args.quiet:\n\t\t\tprint(\"Updating the ephemeris with new DM... \"),\n\t\tf = open(tmpeph,'a')\n\t\tf.write(\"%s\\n %s\\n\" % (dmline, dmepochline))\n\t\tif not args.quiet:\n\t\t\tprint(\" done!\")\n\t\tf.close()\n\n\t\t# updating the ephemeris in the archive with the measured DM\n\t\tif not quiet:\n\t\t\tprint(\"Correcting the DM of the observed file and writing it out... \"),\n\t\tos.remove(tmpeph)\n\t\t# creating the directory for writing the file\n\t\tdirfinal=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_final\")\n\t\tif not os.path.exists(dirfinal):\n\t\t\tos.makedirs(dirfinal)\n\t\t# filename with path of the DM corrected file\n\t\toutfile = dirfinal+\"/\"+ar_psr + \"_\" + str(ar_mjd) + \"_\" + ar_tel + \".ar\"\n\n\t\t# Setting the DMC flag to 1. In other words, doing the DM correction.\n\t\tar.set_dispersion_measure(dmval)\n\t\tar.dedisperse()\n\t\t# Performing different scrunching in the archive for writing out\n\t\tif not args.Tscrunch:\n\t\t\tar.tscrunch(args.tscrunch)\n\t\telse:\n\t\t\tar.tscrunch()\n\t\tif not args.Fscrunch:\n\t\t\tar.fscrunch(ffrac)\n\t\telse:\n\t\t\tar.fscrunch()\n\t\t# Writing out the DM corrected, time/frequency scrunched file.\n\t\tar.unload(outfile)\n\t\tif not args.quiet:\n\t\t\tprint(\" done!\")\n\t\tdel ar\n\t\tif not quiet:\n\t\t\tprint(\"The file is corrected for DM and is written out to\\n\"+outfile)\n\t# Printing the results to the file and also in the terminal\n\tf= open(ar_psr+\"_DM_timeseries.txt\",'a')\n\tf.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' %( filename, \\\n\t\t\tar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err, ar_centfr, \\\n\t\t\tar_bw, ar_tel))\n\tf.close()\n\n\timport time\n\tend = time.time()\n\ttotal = end - start\n\tprint ('-----------------------------------------------------------------------------')\n\tprint ('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n\tprint ('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr, \n\t\t\tfitchisq, ar_centfr, ar_bw, ar_tel) )\n\t\n\tprint ('-----------------------------------------------------------------------------')\n\n\tprint(\"\\nThe program took %.1f seconds to finish\"%total)\n#-------------------------------------------------------------------------------------------#\n\n''' Main function that performs the DM estimation '''\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch): \n\t# Checks if model file is available.\n\tif model == None:\n\t\tsys.exit(1)\n\tinit_dm = ar.get_dispersion_measure()\n\t# setting up the ToA estimation routine using the psrchive ArrivalTime()\n\tif not quiet:\n\t\tprint(\"Using the ArrivalTime (pat) with PGS in Tempo2 format\")\n\tarrtim = psrchive.ArrivalTime()\n\tarrtim.set_shift_estimator('PGS')\n\tarrtim.set_format('Tempo2')\n\tarrtim.set_format_flags('IPTA')\n\tif not quiet:\n\t\tprint(\"Loading the template file for processing... \"),\n\tstd = model.clone()\n\tstd.pscrunch()\n\tstd.tscrunch()\n\tstd_nchan = std.get_nchan()\n\t\n\tstd.dedisperse()\n\tstd.fscrunch(ffrac)\n\tarrtim.set_standard(std)\n\tif not quiet:\n\t\tprint(\" done!\")\n\tar.fscrunch(ffrac)\n\tar.pscrunch()\n\tar.tscrunch()\n\tarrtim.set_observation(ar)\n\tif not quiet:\n\t\tprint(\"Finding the ToAs... \"),\n\n\t# Finding the ToAs and reading it into numpy arrays\n\ttoas = arrtim.get_toas()\n\ttoas_filtered = [x.split()[:5] for x in toas] \n\tstr_filename,str_freq,str_mjd,str_toaErr,str_site = zip(*toas_filtered)\n\tfreq = np.asarray(str_freq, dtype=np.float64)\n\tamjd = np.asarray(str_mjd, dtype=np.float64)\n\tterr = np.asarray(str_toaErr, dtype=np.float64)\n\tif not quiet:\n\t\tprint(\" done!\")\n\t\tprint(\"Removing the bad ToAs using Huber Regression... \"),\n\t# removing the ToAs with zero errors\n\tcondition1 = terr < 3*np.median(terr)\n\tfreqnew = np.extract(condition1,freq)\n\tamjdnew = np.extract(condition1,amjd)\n\tterrnew = np.extract(condition1,terr)\n\t# writing the ToAs to a temporary file for getting the non-phase resolved ToAs using general2\n\ttempfile = ar_psr+\"_tmp.txt\"\n\tf = open(tempfile,\"w+\")\n\thead=\"FORMAT 1\\n\"\n\tf.write('%s' % head)\n\tfor i in range(0,np.size(freqnew)):\n\t\tf.write('%s %.12f %.20f %.8f %s\\n' % \n\t\t\t\t(str_filename[0], freqnew[i], amjdnew[i], terrnew[i], str_site[0]))\n\tf.close()\n\ttmpstr=\"tempo2 -output general2 -f\"\n\ttmp = os.popen(tmpstr+\" %s %s -s \\\"1111111 {freq} {pre} {err}\\n\\\" | grep '1111111'\" %\n\t\t\t\t\t (ephemeris,tempfile)).read()\n\tos.remove(tempfile)\n\n\t# extracting the data from general2 output\n\ttmp1 = tmp.split('\\n')\n\tfreqtmp = np.zeros(np.size(amjdnew))\n\ttoastmp = np.zeros(np.size(amjdnew))\n\tTErrtmp = np.zeros(np.size(amjdnew))\n\tfor i in range(np.size(amjdnew)):\n\t\t_,freqtmp[i],toastmp[i],TErrtmp[i] = (tmp1[i].split())\n\tTErrtmp /= 1e+6\n\t# importing libraries for outlier removal\n\tfrom sklearn import linear_model\n\tfrom sklearn.linear_model import HuberRegressor\n\tfrom sklearn.preprocessing import PolynomialFeatures\n\tfrom sklearn.pipeline import make_pipeline\n\t# changing the shape of frequency array\n\tfreqarr = freqtmp.reshape(-1,1)\n\t# making a nu^2 model and fitting using Huber Regression\n\ttoastmp *= 1e+6\n\ttoashift = (np.min(toastmp)*-1.5)\n\ttoastmp += toashift\n\tTerrtmp = TErrtmp*1e+6\n\tmodel = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n\tmodel.fit(freqarr,toastmp,\n\t\t\t huberregressor__sample_weight=np.ravel(1./Terrtmp))\n\ty_pred = model.predict(freqarr)\n\tresiduals = toastmp - y_pred\n\tmedian = np.median(residuals)\n\tMAD = np.median(np.abs(residuals-np.median(residuals)))/0.6744897501960817\n\t# filtering the good ToAs\n\tcondition2 = (residuals > median - 3*MAD) & (residuals < median + 3*MAD)\n\tfreqf = np.around(np.extract(condition2,freqarr),3)\n\tamjdf = np.extract(condition2,amjdnew)\n\ttoasf = np.extract(condition2,toastmp)\n\tterrf = np.extract(condition2,TErrtmp)\n\tprefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n\t\n\tterrf *= 1e+6\n\tif not quiet:\n\t\tprint(\" done!\")\n\t# writing out the ToAs in proper format\n\tif ptoa:\n\t\tif not quiet:\n\t\t\tprint ('Writing out ToAs into a file in tempo2 format'),\n\t\tdirtoas=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_ToAs\")\n\t\tif not os.path.exists(dirtoas):\n\t\t os.makedirs(dirtoas)\n\t\toutfile=dirtoas+\"/\"+ar_psr+\"_\"+str(ar_mjd)+\"_\"+ar_tel+\"_ToAs.txt\"\n\t\tf = open(outfile,\"w+\")\n\t\thead=\"FORMAT 1\"\n\t\tf.write('%s\\n' % head)\n\t\tfor i in range(0,np.size(freqf)):\n\t\t\tf.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))\n\t\tf.close()\n\t\tif not quiet:\n\t\t\tprint(\"done!\")\n\n\t# Fitting the ToAs with tempo2 for DM\n\tif not quiet:\n\t\tprint(\"\\nWriting the ToAs to a temporary file for tempo2 fitting...\"),\n\toutfiletmp=ar_psr+\"tmp_ToAs.txt\"\n\tf = open(outfiletmp,\"w+\")\n\thead=\"FORMAT 1\"\n\tf.write('%s\\n' % head)\n\tfor i in range(0,np.size(freqf)):\n\t\tf.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))\n\tf.close()\n\tif not quiet:\n\t\tprint(\" done!\\n\")\n\t# performing the fit\n\tdmstr=os.popen(\"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk \\'{print $5,$6}\\'\" \n\t\t\t\t\t% (ephemeris, outfiletmp)).read()\n\t(dm, dmerr) = dmstr.split()\n\tdmval = float(dm)\n\tdmverr = float(dmerr)\n\t# doing the fit again to read the chisquare\n\tchisqstr=os.popen(\"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk \\'{print $9}\\'\" \n\t\t\t\t\t% (ephemeris, outfiletmp)).read()\n\tfitchisq = float(chisqstr)\n\tos.remove(outfiletmp)\n\n\t# Preparing the data for plotting the residuals, prefit and postfit\n\tinfile = open(ephemeris,\"r\")\n\ttmpeph1 = ar_psr+'_tmpeph.eph'\n\toutput = open(tmpeph1,\"w+\")\n\tfor i, line in enumerate(infile):\n\t\tif not line.lstrip().startswith('DM'):\n\t\t\t\tif not line.lstrip().startswith('DMEPOCH'):\n\t\t\t\t\toutput.write(line)\n\tinfile.close()\n\toutput.close()\n\t# updating the ephemeris file with measured DM\n\tdmline = \"DM \"+str(dmval)+\"\\t1\\t\"+str(dmverr)\n\tdmepochline = \"DMEPOCH\t \"+str(round(ar_mjd,2))\n\tf = open(tmpeph1,'a')\n\tf.write('%s\\n%s\\n' % (dmline, dmepochline))\n\tf.close()\n\tnewarch = ar.clone()\n\tnewarch.tscrunch()\n\tnewarch.set_dispersion_measure(dmval)\n\tarrtim.set_observation(newarch)\n\tarrtim.set_standard(std)\n\ttoas1 = arrtim.get_toas()\n\ttoas1_filtered = [x.split()[:5] for x in toas1] \n\tstr_filename1,str_freq1,str_mjd1,str_toaErr1,str_site1 = zip(*toas1_filtered)\n\tfreq1 = np.asarray(str_freq1, dtype=np.float64)\n\tamjd1 = np.asarray(str_mjd1, dtype=np.float64)\n\tterr1 = np.asarray(str_toaErr1, dtype=np.float64)\n\tfreqnew1 = np.extract(condition1,freq1)\n\tamjdnew1 = np.extract(condition1,amjd1)\n\tterrnew1 = np.extract(condition1,terr1)\n\ttempfile1 = ar_psr+\"_tmp1.txt\"\n\tf = open(tempfile1,\"w+\")\n\thead=\"FORMAT 1\\n\"\n\tf.write('%s' % head)\n\tfor i in range(0,np.size(freqnew1)):\n\t\tf.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i], amjdnew1[i], terrnew1[i], str_site1[0]))\n\tf.close()\n\n\ttmp2 = os.popen(\"tempo2 -output general2 -f %s %s -s \\\"1111111 {freq} {pre} {err}\\n\\\" | grep '1111111'\" \n\t\t\t\t\t% (tmpeph1,tempfile1)).read()\n\tos.remove(tempfile1)\n\tos.remove(tmpeph1)\n\t# extracting the data from general2 output\n\ttmp3 = tmp2.split('\\n')\n\tfreqtmp2 = np.zeros(np.size(amjdnew1))\n\ttoastmp2 = np.zeros(np.size(amjdnew1))\n\tTErrtmp2 = np.zeros(np.size(amjdnew1))\n\tfor i in range(np.size(amjdnew1)):\n\t\t_,freqtmp2[i],toastmp2[i],TErrtmp2[i] = (tmp3[i].split())\n\tfreqf1 = np.around(np.extract(condition2,freqtmp2),3)\n\tamjdf1 = np.extract(condition2,amjdnew1)\n\ttoasf1 = np.extract(condition2,toastmp2)\n\tterrf1 = np.extract(condition2,TErrtmp2)\n\ttoasf1 *= 1e+6\n\tpostfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n\tar_nbin = newarch.get_nbin()\n\tar_nchn = newarch.get_nchan()\n\tif (narch == 1):\n\t\tfreq_bot = (ar.get_centre_frequency() - ar_bw/2.0)\n\t\tfreq_top = (ar.get_centre_frequency() + ar_bw/2.0)\n\tif (narch > 1):\n\t\tif (ar_bw == 200.):\n\t\t\tfreq_bot = 400.0\n\t\t\tfreq_top = 1460.0\n\t\tif (ar_bw == 400.):\n\t\t\tfreq_bot = 300.0\n\t\t\tfreq_top = 1460.0\n\t# Getting the profile data for plotting\n\tnewarch.dedisperse()\n\tnewarch.remove_baseline()\n\tprofdata2D = newarch.get_data()[:,0,:,:].flatten().reshape(ar_nchn,ar_nbin)\n\tprof = newarch.clone()\n\tprof.fscrunch()\n\tprofdata1D = prof.get_data().flatten()\n\tprofdata1D /= np.max(profdata1D)\n\tresidDM = init_dm - dmval\n\tdmcurve = 4.15 * 1000. * residDM * ( (1./(np.min(freqf)/1000.)**2) - (1./(freqf/1000.)**2) )\n\tdmoff = np.median(toasf) - np.median(dmcurve)\n\tdmcurve += dmoff\n\t# Now does the actual plotting\t\n\tfig = plt.figure(3, figsize=(8, 6))\n\tfig.subplots_adjust(hspace=0.05)\n\tax0 = plt.subplot2grid((3, 8), (0,0), rowspan=2, colspan=3)\n\tax1 = plt.subplot2grid((3, 8), (2,0), rowspan=1, colspan=3)\n\tax2 = plt.subplot2grid((3, 8), (0,4), colspan=4)\n\tax3 = plt.subplot2grid((3, 8), (1,4), colspan=4)\n\tax4 = plt.subplot2grid((3, 8), (2,4), colspan=4)\n\tax0.imshow((np.sqrt(profdata2D**2))**0.5, origin='lower', extent=(0,ar_nbin-1,freq_bot,freq_top), aspect='auto', cmap='hot')\n\tax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n\tax0.tick_params(axis='x', which='both', bottom=True, top=True, \n\t\t\tlabelbottom=False)\n\tax1.plot(np.arange(ar_nbin, dtype=float),profdata1D, color='black', linewidth=0.5)\n\tax1.set_xlim(0,ar_nbin-1)\n\tax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n\tax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n\tax2.errorbar(freqtmp, toastmp, yerr=Terrtmp,fmt='.', color='gray', label='Prefit: Unfiltered', capsize=2)\n\tax2.plot(freqtmp, y_pred,'--r', label='Polynomial Fit')\n\tax2.set_xlim(freq_bot, freq_top)\n\tax2.grid()\n\tax2.legend(loc='upper right')\n\tax2.axes.xaxis.set_ticklabels([])\n\tax3.yaxis.set_label_position(\"right\")\n\tax3.errorbar(freqf, toasf-np.median(toasf), terrf,fmt='.k', label='Prefit: Filtered', capsize=2)\n\tax3.set_xlim(freq_bot, freq_top)\n\tax3.grid()\n\tax3.legend(loc='upper right')\n\tax3.axes.xaxis.set_ticklabels([])\n\tax3.set_ylabel(r'ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)\n\tax4.errorbar(freqf1, toasf1-np.median(toasf1), terrf1, fmt='.r', label='Postfit', capsize=2)\n\tax4.set_xlim(freq_bot, freq_top)\n\tax4.grid()\n\tax4.legend(loc='upper right')\n\tax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n\tfig.suptitle('Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s\\nMedian ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f' % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n\tdirplot=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_plots\")\n\tif not os.path.exists(dirplot):\n\t os.makedirs(dirplot)\n\tplotfile=dirplot+\"/\"+ar_psr+\"_\"+str(ar_mjd)+\"_\"+str(ar_centfr)+\"_\"+ar_tel+\"_DMfitResid.pdf\"\n\tplt.savefig(plotfile, format='pdf')\n\tplt.close()\n\tif not quiet:\n\t\tprint ('done!')\n\tdel ar\n\treturn(dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1))\n\n\n''' Frequency appending the data archives '''\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n\n\tfor i in range(narch):\n\t\tarchives[i].tscrunch()\n\t# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 \n\t# will be dependent on the pulsar period. Default values of this jump given \n\t# is from the timing of PSR J1643-1224. \n\t# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).\n\tif (archives[0].get_telescope() == 'GMRT'):\n\t\tfor i in range(narch):\n\t\t\tar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n\t\t\tar_frq = archives[i].get_centre_frequency()\n\t\t\tar_bw = archives[i].get_bandwidth()\n\t\t\tperiod = (archives[i].get_Integration(0).get_folding_period())\n\t\t\toffset = 0.670520675\n\t\t\tjump = (offset/period) - int(offset/period)\n\t\t\tif (ar_frq >= 1260. and ar_frq < 1460.):\n\t\t\t\tif (ar_mjd >=58810. and ar_mjd < 58991.):\n\t\t\t\t\tarchives[i].rotate_phase(-jump)\n\tfreq_append = psrchive.FrequencyAppend()\n\tttfreq = archives[0].get_centre_frequency()\n\tif (300. < ttfreq < 500.):\n\t\tarchives[0].fscrunch(b3scrunch)\n\tif (1160. < ttfreq < 1460.):\n\t\tarchives[0].fscrunch(b5scrunch)\n\n\tfreq_append.init(archives[0])\n\twhile len(archives) > 1:\n\t\tttfreq = archives[1].get_centre_frequency()\n\t\tif (300. < ttfreq < 500.):\n\t\t\tarchives[1].fscrunch(b3scrunch)\n\t\tif (1160. < ttfreq < 1460.):\n\t\t\tarchives[1].fscrunch(b5scrunch)\n\t\t\n\t\tfreq_append.append(archives[0],archives[1])\n\t\tdel archives[1]\n\treturn(archives[0])\n\n''' Frequency Appending the Templates '''\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n\n\tfor i in range(narch):\n\t\tarchives[i].tscrunch()\n\t# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 \n\t# will be dependent on the pulsar period. Default values of this jump given \n\t# is from the timing of PSR J1643-1224. \n\t# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).\n\tif (archives[0].get_telescope() == 'GMRT'):\n\t\tfor i in range(narch):\n\t\t\tar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n\t\t\tar_frq = archives[i].get_centre_frequency()\n\t\t\tar_bw = archives[i].get_bandwidth()\n\t\t\tperiod = (archives[i].get_Integration(0).get_folding_period())\n\t\t\toffset = 0.670520675\n\t\t\tjump = (offset/period) - int(offset/period)\n\t\t\tif (ar_frq >= 1260. and ar_frq < 1460.):\n\t\t\t\tif (ar_mjd >=58810. and ar_mjd < 58991.):\n\t\t\t\t\tarchives[i].rotate_phase(-jump)\n\n\tfreq_append = psrchive.FrequencyAppend()\n\tttfreq = archives[0].get_centre_frequency()\n\tif (300. < ttfreq < 500.):\n\t\tarchives[0].fscrunch(b3scrunch)\n\tif (1160. < ttfreq < 1460.):\n\t\tarchives[0].fscrunch(b5scrunch)\n\tfreq_append.init(archives[0])\n\twhile len(archives) > 1:\n\t\tttfreq = archives[1].get_centre_frequency()\n\t\tif (300. < ttfreq < 500.):\n\t\t\tarchives[1].fscrunch(b3scrunch)\n\t\tif (1160. < ttfreq < 1460.):\n\t\t\tarchives[1].fscrunch(b5scrunch)\n\t\tfreq_append.append(archives[0],archives[1])\n\t\tdel archives[1]\n\treturn(archives[0])\n\n#----------------------------------------------------------------------------------#\n\nmain()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> class CreateExtraFeatures(BaseEstimator, TransformerMixin): <|reserved_special_token_0|> <|reserved_special_token_0|> def transform(self, X, y=None): X['hair_soul'] = X['hair_length'] * X['has_soul'] X['flesh_soul'] = X['rotting_flesh'] * X['has_soul'] return np.c_[X] class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CreateExtraFeatures(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X, y=None): X['hair_soul'] = X['hair_length'] * X['has_soul'] X['flesh_soul'] = X['rotting_flesh'] * X['has_soul'] return np.c_[X] class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(train_set) <|reserved_special_token_0|> print(train_set.describe()) <|reserved_special_token_0|> print(train_set.info()) <|reserved_special_token_0|> class CreateExtraFeatures(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X, y=None): X['hair_soul'] = X['hair_length'] * X['has_soul'] X['flesh_soul'] = X['rotting_flesh'] * X['has_soul'] return np.c_[X] class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] <|reserved_special_token_0|> grid_search.fit(X_train, y_train) print(grid_search.best_estimator_) print(grid_search.best_score_) <|reserved_special_token_0|> submissions.to_csv('submission.csv', index=True) <|reserved_special_token_1|> <|reserved_special_token_0|> train_set = pandas.read_csv('./dataset/train.csv') test_set = pandas.read_csv('./dataset/test.csv') print(train_set) train_set = train_set.drop('id', axis=1) print(train_set.describe()) train_set['type'], categories = train_set['type'].factorize() <|reserved_special_token_0|> print(train_set.info()) <|reserved_special_token_0|> X_train = train_set.drop('type', axis=1) y_train = train_set.get('type') X_train = X_train.append(test_set) <|reserved_special_token_0|> class CreateExtraFeatures(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X, y=None): X['hair_soul'] = X['hair_length'] * X['has_soul'] X['flesh_soul'] = X['rotting_flesh'] * X['has_soul'] return np.c_[X] class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] <|reserved_special_token_0|> num_attributes = ['bone_length', 'rotting_flesh', 'hair_length', 'has_soul'] cat_attributes = ['color'] pipeline_num = Pipeline([('selector', DataFrameSelector(num_attributes)), ( 'extra_feat', CreateExtraFeatures())]) pipeline_cat = Pipeline([('selector', DataFrameSelector(cat_attributes)), ( 'categorical_encoder', OneHotEncoder(sparse=False))]) <|reserved_special_token_0|> full_pipeline = FeatureUnion([('pip,num', pipeline_num), ('pip_cat', pipeline_cat)]) X_train = full_pipeline.fit_transform(X_train) X_test = X_train[371:] X_train = X_train[:371] <|reserved_special_token_0|> nn_clf = MLPClassifier(max_iter=3000) <|reserved_special_token_0|> grid_params = [{'hidden_layer_sizes': range(3, 20), 'activation': [ 'identity', 'logistic', 'tanh', 'relu'], 'solver': ['lbfgs', 'sgd', 'adam'], 'learning_rate': ['adaptive']}] grid_search = GridSearchCV(nn_clf, param_grid=grid_params, cv=3, verbose=3, n_jobs=-1) grid_search.fit(X_train, y_train) print(grid_search.best_estimator_) print(grid_search.best_score_) y_pred = grid_search.predict(X_test) submissions = pandas.DataFrame(y_pred, index=test_set.id, columns=['type']) submissions['type'] = categories[submissions['type']] submissions.to_csv('submission.csv', index=True) <|reserved_special_token_1|> import pandas import numpy as np train_set = pandas.read_csv("./dataset/train.csv") test_set = pandas.read_csv("./dataset/test.csv") print(train_set) train_set = train_set.drop('id',axis=1) print(train_set.describe()) train_set['type'], categories = train_set['type'].factorize() import matplotlib.pyplot as plt print(train_set.info()) ''' fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(111) cax = ax.matshow(train_set.corr()) fig.colorbar(cax) ax.set_xticklabels(train_set.columns) ax.set_yticklabels(train_set.columns) plt.show()''' X_train = train_set.drop('type',axis=1) y_train = train_set.get('type') X_train= X_train.append(test_set) #print(X_train.info()) from sklearn.base import BaseEstimator, TransformerMixin class CreateExtraFeatures(BaseEstimator,TransformerMixin): def __init__(self):pass def fit(self,X,y=None): return self def transform(self,X,y=None): X['hair_soul'] = X['hair_length'] * X['has_soul'] X['flesh_soul'] = X['rotting_flesh'] * X['has_soul'] return np.c_[X] class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder num_attributes = ["bone_length","rotting_flesh","hair_length","has_soul"] cat_attributes = ["color"] pipeline_num = Pipeline([ ("selector",DataFrameSelector(num_attributes)), ("extra_feat",CreateExtraFeatures()) ]) pipeline_cat = Pipeline([ ("selector", DataFrameSelector(cat_attributes)), ("categorical_encoder", OneHotEncoder(sparse=False)) ]) from sklearn.pipeline import FeatureUnion full_pipeline = FeatureUnion([ ("pip,num",pipeline_num), ("pip_cat",pipeline_cat) ]) X_train= full_pipeline.fit_transform(X_train) X_test = X_train[371:] X_train = X_train[:371] from sklearn.neural_network import MLPClassifier nn_clf = MLPClassifier(max_iter=3000) from sklearn.model_selection import GridSearchCV grid_params = [{"hidden_layer_sizes":range(3,20), "activation":['identity', 'logistic', 'tanh', 'relu'], "solver":["lbfgs","sgd","adam"],"learning_rate":["adaptive"]}] grid_search = GridSearchCV(nn_clf,param_grid=grid_params,cv=3,verbose=3, n_jobs=-1) grid_search.fit(X_train,y_train) print(grid_search.best_estimator_) print(grid_search.best_score_) #X_test = full_pipeline.fit_transform(test_set[num_attributes],test_set[cat_attributes].values) y_pred = grid_search.predict(X_test) submissions = pandas.DataFrame(y_pred, index=test_set.id,columns=["type"]) submissions["type"] = categories[submissions["type"]] submissions.to_csv('submission.csv', index=True)
flexible
{ "blob_id": "ccedca543fc4dee284a9243317d028ffdeac229d", "index": 2923, "step-1": "<mask token>\n\n\nclass CreateExtraFeatures(BaseEstimator, TransformerMixin):\n <mask token>\n <mask token>\n\n def transform(self, X, y=None):\n X['hair_soul'] = X['hair_length'] * X['has_soul']\n X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']\n return np.c_[X]\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[self.attribute_names]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CreateExtraFeatures(BaseEstimator, TransformerMixin):\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X['hair_soul'] = X['hair_length'] * X['has_soul']\n X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']\n return np.c_[X]\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[self.attribute_names]\n\n\n<mask token>\n", "step-3": "<mask token>\nprint(train_set)\n<mask token>\nprint(train_set.describe())\n<mask token>\nprint(train_set.info())\n<mask token>\n\n\nclass CreateExtraFeatures(BaseEstimator, TransformerMixin):\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X['hair_soul'] = X['hair_length'] * X['has_soul']\n X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']\n return np.c_[X]\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[self.attribute_names]\n\n\n<mask token>\ngrid_search.fit(X_train, y_train)\nprint(grid_search.best_estimator_)\nprint(grid_search.best_score_)\n<mask token>\nsubmissions.to_csv('submission.csv', index=True)\n", "step-4": "<mask token>\ntrain_set = pandas.read_csv('./dataset/train.csv')\ntest_set = pandas.read_csv('./dataset/test.csv')\nprint(train_set)\ntrain_set = train_set.drop('id', axis=1)\nprint(train_set.describe())\ntrain_set['type'], categories = train_set['type'].factorize()\n<mask token>\nprint(train_set.info())\n<mask token>\nX_train = train_set.drop('type', axis=1)\ny_train = train_set.get('type')\nX_train = X_train.append(test_set)\n<mask token>\n\n\nclass CreateExtraFeatures(BaseEstimator, TransformerMixin):\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X['hair_soul'] = X['hair_length'] * X['has_soul']\n X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']\n return np.c_[X]\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[self.attribute_names]\n\n\n<mask token>\nnum_attributes = ['bone_length', 'rotting_flesh', 'hair_length', 'has_soul']\ncat_attributes = ['color']\npipeline_num = Pipeline([('selector', DataFrameSelector(num_attributes)), (\n 'extra_feat', CreateExtraFeatures())])\npipeline_cat = Pipeline([('selector', DataFrameSelector(cat_attributes)), (\n 'categorical_encoder', OneHotEncoder(sparse=False))])\n<mask token>\nfull_pipeline = FeatureUnion([('pip,num', pipeline_num), ('pip_cat',\n pipeline_cat)])\nX_train = full_pipeline.fit_transform(X_train)\nX_test = X_train[371:]\nX_train = X_train[:371]\n<mask token>\nnn_clf = MLPClassifier(max_iter=3000)\n<mask token>\ngrid_params = [{'hidden_layer_sizes': range(3, 20), 'activation': [\n 'identity', 'logistic', 'tanh', 'relu'], 'solver': ['lbfgs', 'sgd',\n 'adam'], 'learning_rate': ['adaptive']}]\ngrid_search = GridSearchCV(nn_clf, param_grid=grid_params, cv=3, verbose=3,\n n_jobs=-1)\ngrid_search.fit(X_train, y_train)\nprint(grid_search.best_estimator_)\nprint(grid_search.best_score_)\ny_pred = grid_search.predict(X_test)\nsubmissions = pandas.DataFrame(y_pred, index=test_set.id, columns=['type'])\nsubmissions['type'] = categories[submissions['type']]\nsubmissions.to_csv('submission.csv', index=True)\n", "step-5": "import pandas\nimport numpy as np\n\ntrain_set = pandas.read_csv(\"./dataset/train.csv\")\ntest_set = pandas.read_csv(\"./dataset/test.csv\")\nprint(train_set)\ntrain_set = train_set.drop('id',axis=1)\nprint(train_set.describe())\n\ntrain_set['type'], categories = train_set['type'].factorize()\n\nimport matplotlib.pyplot as plt\nprint(train_set.info())\n'''\nfig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111)\ncax = ax.matshow(train_set.corr())\nfig.colorbar(cax)\n\nax.set_xticklabels(train_set.columns)\nax.set_yticklabels(train_set.columns)\n\nplt.show()'''\n\nX_train = train_set.drop('type',axis=1)\ny_train = train_set.get('type')\nX_train= X_train.append(test_set)\n#print(X_train.info())\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nclass CreateExtraFeatures(BaseEstimator,TransformerMixin):\n def __init__(self):pass\n\n def fit(self,X,y=None):\n return self\n def transform(self,X,y=None):\n X['hair_soul'] = X['hair_length'] * X['has_soul']\n X['flesh_soul'] = X['rotting_flesh'] * X['has_soul']\n return np.c_[X]\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names]\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder\nnum_attributes = [\"bone_length\",\"rotting_flesh\",\"hair_length\",\"has_soul\"]\ncat_attributes = [\"color\"]\n\npipeline_num = Pipeline([\n (\"selector\",DataFrameSelector(num_attributes)),\n (\"extra_feat\",CreateExtraFeatures())\n])\n\npipeline_cat = Pipeline([\n (\"selector\", DataFrameSelector(cat_attributes)),\n (\"categorical_encoder\", OneHotEncoder(sparse=False))\n])\n\nfrom sklearn.pipeline import FeatureUnion\n\nfull_pipeline = FeatureUnion([\n (\"pip,num\",pipeline_num),\n (\"pip_cat\",pipeline_cat)\n])\nX_train= full_pipeline.fit_transform(X_train)\n\nX_test = X_train[371:]\nX_train = X_train[:371]\nfrom sklearn.neural_network import MLPClassifier\n\nnn_clf = MLPClassifier(max_iter=3000)\n\nfrom sklearn.model_selection import GridSearchCV\n\ngrid_params = [{\"hidden_layer_sizes\":range(3,20), \"activation\":['identity', 'logistic', 'tanh', 'relu'], \"solver\":[\"lbfgs\",\"sgd\",\"adam\"],\"learning_rate\":[\"adaptive\"]}]\ngrid_search = GridSearchCV(nn_clf,param_grid=grid_params,cv=3,verbose=3, n_jobs=-1)\n\ngrid_search.fit(X_train,y_train)\n\nprint(grid_search.best_estimator_)\nprint(grid_search.best_score_)\n\n#X_test = full_pipeline.fit_transform(test_set[num_attributes],test_set[cat_attributes].values)\n\n\n\ny_pred = grid_search.predict(X_test)\n\nsubmissions = pandas.DataFrame(y_pred, index=test_set.id,columns=[\"type\"])\nsubmissions[\"type\"] = categories[submissions[\"type\"]]\nsubmissions.to_csv('submission.csv', index=True)\n", "step-ids": [ 6, 8, 9, 10, 12 ] }
[ 6, 8, 9, 10, 12 ]
from flask import Flask,Blueprint from .views import login from flask_session import Session import redis app = Flask(__name__,template_folder='templates',static_url_path='static') app.debug = True print('app.root_path===',app.root_path) print('app.static_url_path===',app.static_url_path) app.secret_key('uaremyhero') app.config['SESSION_TYPE'] = 'redis' # session类型为redis app.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379', password='123123') # 用于连接redis的配置 app.config['SESSION_KEY_PREFIX'] = 'session:' # 保存到session中的值的前缀 app.config['SESSION_PERMANENT'] = False # 如果设置为True,则关闭浏览器session就失效。 app.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上 session:cookie值进行加密 Session(app) app.register_blueprint(login.login) app.register_blueprint()
normal
{ "blob_id": "9d2fdf47b5c4b56cc0177a9c0a86b1ed57c88d49", "index": 4151, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\n<mask token>\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n", "step-3": "<mask token>\napp = Flask(__name__, template_folder='templates', static_url_path='static')\napp.debug = True\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\napp.config['SESSION_TYPE'] = 'redis'\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',\n password='123123')\napp.config['SESSION_KEY_PREFIX'] = 'session:'\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_USE_SIGNER'] = False\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n", "step-4": "from flask import Flask, Blueprint\nfrom .views import login\nfrom flask_session import Session\nimport redis\napp = Flask(__name__, template_folder='templates', static_url_path='static')\napp.debug = True\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\napp.config['SESSION_TYPE'] = 'redis'\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',\n password='123123')\napp.config['SESSION_KEY_PREFIX'] = 'session:'\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_USE_SIGNER'] = False\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n", "step-5": "from flask import Flask,Blueprint\nfrom .views import login\nfrom flask_session import Session\nimport redis\n\n\napp = Flask(__name__,template_folder='templates',static_url_path='static')\napp.debug = True\n\nprint('app.root_path===',app.root_path)\nprint('app.static_url_path===',app.static_url_path)\n\napp.secret_key('uaremyhero')\n\napp.config['SESSION_TYPE'] = 'redis' # session类型为redis\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379', password='123123') # 用于连接redis的配置\napp.config['SESSION_KEY_PREFIX'] = 'session:' # 保存到session中的值的前缀\napp.config['SESSION_PERMANENT'] = False # 如果设置为True,则关闭浏览器session就失效。\napp.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上 session:cookie值进行加密\nSession(app)\n\n\n\napp.register_blueprint(login.login)\napp.register_blueprint()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python import mcvine.cli from numpy import array from mcvine_workflow.singlextal.resolution import use_res_comps as urc beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons' instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter') samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml' psi = -0.005846744654920276 hkl2Q = array([[-0.65520642, 0.93819023, 0. ], [ 0.66340068, 0.4633009 , -0.80916512], [-0.66340068, -0.4633009 , -0.80916512]]) pp = array([-0.88585691, 2.86622706, -0.61241657]) pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0])) t_m2p = 0.0071883434093180376 Q = array([ 4.75696626, -3.03446862, 0.64836415]) E = 8.4494171829103024 hkl_projection = array([ 0.70608101, 0.61545409, 0.14251389]) urc.run( beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p, Q, E, hkl_projection, Nbuffer=100000)
normal
{ "blob_id": "de286b94e09db477e3d920a9eff1a299474baf20", "index": 2614, "step-1": "<mask token>\n", "step-2": "<mask token>\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-3": "<mask token>\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-4": "import mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-5": "#!/usr/bin/env python\nimport mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0. ],\n [ 0.66340068, 0.4633009 , -0.80916512],\n [-0.66340068, -0.4633009 , -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))\nt_m2p = 0.0071883434093180376\nQ = array([ 4.75696626, -3.03446862, 0.64836415])\nE = 8.4494171829103024\nhkl_projection = array([ 0.70608101, 0.61545409, 0.14251389])\nurc.run(\n beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,\n Q, E, hkl_projection, Nbuffer=100000)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python3 # -*- coding: utf-8 -*- #Modules externes import os import re import logging import csv import xml.etree.ElementTree as ET from chardet import detect #Modules maison from Abes_Apis_Interface.AbesXml import AbesXml from Alma_Apis_Interface import Alma_Apis_Records from Alma_Apis_Interface import Alma_Apis from logs import logs SERVICE = "Recotation_en_masse" LOGS_LEVEL = 'INFO' LOGS_DIR = os.getenv('LOGS_PATH') LIBRARY_CODE = 1601900000 REGION = 'EU' INSTITUTION = 'ub' API_KEY = os.getenv('PROD_UB_BIB_API') FILE_NAME = 'Dewey 20201218 cotes OE Scoop V3' IN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME) OUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME) CALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME) # get file encoding type def get_encoding_type(file): with open(file, 'rb') as f: rawdata = f.read() return detect(rawdata)['encoding'] def item_change_location(item,location,call): """Change location and remove holdinds infos Arguments: item {str} -- xml response of get item ws location {str} -- new location_code call {str} -- new call Returns: [str] -- mms_id, holding_id, pid """ mms_id, holding_id, pid = item.find(".//mms_id").text, item.find(".//holding_id").text, item.find(".//pid").text item.find(".//item_data/location").text = location # On nettoie la cote présente au niveau de l'exemplaire item.find(".//item_data/alternative_call_number").text = '' item.find(".//item_data/alternative_call_number_type").text = '' # On ne renvoie pas les infos de la holdings holding_data = item.find(".//holding_data") item.remove(holding_data) # Si un autre exemplaire lié à la même notice a déjà été traité if mms_id in processed_record_dict: # Si la localisation était la même que celle de l'exemplaire déjà traité if location_code in processed_record_dict[mms_id]: # Si les cotes sont différentes alors on créé la cote sous l'exemplaire if processed_record_dict[mms_id][location_code] != call: multi_call_report.write("{}\n".format(barcode)) item.find(".//item_data/alternative_call_number").text = call return mms_id, holding_id, pid def update_holding_data(holding,new_call): """Change call (852$$h) and reset call type (852 fiest indicator) Arguments: holding {str} -- response of get holding ws new_call {str} -- new value for call subfield Returns: str -- changed data """ holding_data = ET.fromstring(holding) location_field =holding_data.find(".//datafield[@tag='852']") location_field.set('ind1', ' ') call_subfield = holding_data.find(".//datafield[@tag='852']/subfield[@code='h']") call_subfield.text = new_call return ET.tostring(holding_data) #Init logger logs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL) log_module = logging.getLogger(SERVICE) conf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE) alma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE) #We get all the locations for the library in a dictionnary locations_dict = conf.get_locations(LIBRARY_CODE) log_module.info("Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement".format(LIBRARY_CODE)) report = open(OUT_FILE, "w", encoding='utf-8') report.write("Code-barres\tStatut\tMessage\n") processed_record_dict = {} toprocess_holding_dict = {} multi_call_report = open(CALL_ERROR_FILE, "w", encoding='utf-8') multi_call_report.write("code-barres\n") ###Update item sequence # ###################### from_codec = get_encoding_type(IN_FILE) with open(IN_FILE, 'r', encoding=from_codec, newline='') as f: reader = csv.reader(f, delimiter=';') headers = next(reader) # We read the file for row in reader: if len(row) < 2: continue barcode = row[0] # Test if new call is defined if row[1] is None or row[1] == '': log_module.error("{} :: Echec :: pas de cote fournie".format(barcode)) report.write("{}\tErreur Fichier\tPas de cote fournie\n".format(barcode)) continue call = row[1].upper() # Test if new localisation is defined if row[3] is None or row[3] == '': log_module.error("{} :: Echec :: pas de localisation fournie".format(barcode)) report.write("{}\tErreur Fichier\tPas de localisation fournie\n".format(barcode)) continue # log_module.info("{} :: Main :: Début du traitement".format(barcode)) # Transform location label in location code if row[3] not in locations_dict: log_module.error("{} :: Echec :: La localisation {} est inconnue dans Alma".format(barcode,row[3])) report.write("{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n".format(barcode,row[3])) continue location_code = locations_dict[row[3]] log_module.debug("{} :: Succes :: A affecter dans la localisation {}".format(barcode,location_code)) # Get datas item with barcode status, response = alma_api.get_item_with_barcode(barcode) if status == 'Error': log_module.error("{} :: Echec :: {}".format(barcode,response)) report.write("{}\tErreur Retrouve Exemplaire\t{}\n".format(barcode,response)) continue # Change location and remove holdinds infos item = ET.fromstring(response) mms_id, old_holding_id,item_id = item_change_location(item,location_code, call) # log_module.debug("{} :: {} - {} - {}".format(barcode,mms_id,old_holding_id,item_id)) # Upadte item in Alma set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item)) log_module.debug(set_response) if set_status == 'Error': log_module.error("{} :: Echec :: {}".format(barcode,set_response)) report.write("{}\tErreur Mise à jour Exemplaire\t{}\n".format(barcode,set_response)) continue changed_item = ET.fromstring(set_response) new_holding_id = changed_item.find(".//holding_id").text processed_record_dict[mms_id] = { location_code: call } if new_holding_id not in toprocess_holding_dict: toprocess_holding_dict[new_holding_id] = { 'call' : call, 'barcode': barcode } log_module.info("{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}".format(barcode,new_holding_id)) log_module.info("FIN DU DEPLACEMENT DES EXEMPLAIRES") ###Update new holding sequence # ############################ log_module.info("DEBUT DE LA MODIFICATION DES HOLDINGS") for new_holding_id in toprocess_holding_dict.keys(): call = toprocess_holding_dict[new_holding_id]['call'] barcode = toprocess_holding_dict[new_holding_id]['barcode'] # Get new holding get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id) if get_holding_status == 'Error': log_module.error("{} :: Echec :: {}".format(new_holding_id,get_holding_response)) report.write("{}\tErreur Retrouve Holding\t{}\n".format(barcode,get_holding_response)) continue changed_holding = update_holding_data(get_holding_response,call) #Update new Holding in Alma set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding) if set_holding_status == 'Error': log_module.error("{} :: Echec :: {}".format(new_holding_id,set_holding_response)) report.write("{}\tErreur Ecriture Holding\t{}\n".format(barcode,set_holding_response)) continue log_module.debug(set_holding_response) log_module.info("{} :: Succes :: La holding a été mise à jour".format(new_holding_id)) report.close multi_call_report.close log_module.info("FIN DU TRAITEMENT")
normal
{ "blob_id": "1f94ef0aae1128089b34fc952766cc3927677cdf", "index": 5698, "step-1": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\n<mask token>\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\n<mask token>\nreport.write('Code-barres\\tStatut\\tMessage\\n')\n<mask token>\nmulti_call_report.write('code-barres\\n')\n<mask token>\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n", "step-3": "<mask token>\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n", "step-4": "import os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n", "step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#Modules externes\nimport os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\n\n#Modules maison\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\n\nSERVICE = \"Recotation_en_masse\"\n\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\n\nLIBRARY_CODE = 1601900000\n\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\n\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME)\n\n# get file encoding type\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\ndef item_change_location(item,location,call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find(\".//mms_id\").text, item.find(\".//holding_id\").text, item.find(\".//pid\").text\n item.find(\".//item_data/location\").text = location\n # On nettoie la cote présente au niveau de l'exemplaire\n item.find(\".//item_data/alternative_call_number\").text = ''\n item.find(\".//item_data/alternative_call_number_type\").text = ''\n # On ne renvoie pas les infos de la holdings\n holding_data = item.find(\".//holding_data\")\n item.remove(holding_data)\n # Si un autre exemplaire lié à la même notice a déjà été traité\n if mms_id in processed_record_dict:\n # Si la localisation était la même que celle de l'exemplaire déjà traité\n if location_code in processed_record_dict[mms_id]:\n # Si les cotes sont différentes alors on créé la cote sous l'exemplaire\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write(\"{}\\n\".format(barcode))\n item.find(\".//item_data/alternative_call_number\").text = call\n return mms_id, holding_id, pid\n\ndef update_holding_data(holding,new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field =holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n#Init logger\nlogs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\n\n\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE)\n\n#We get all the locations for the library in a dictionnary\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\"Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement\".format(LIBRARY_CODE))\n\nreport = open(OUT_FILE, \"w\", encoding='utf-8')\nreport.write(\"Code-barres\\tStatut\\tMessage\\n\")\n\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, \"w\", encoding='utf-8')\nmulti_call_report.write(\"code-barres\\n\")\n\n###Update item sequence\n# ###################### \nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n # We read the file\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n # Test if new call is defined\n if row[1] is None or row[1] == '':\n log_module.error(\"{} :: Echec :: pas de cote fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de cote fournie\\n\".format(barcode))\n continue\n call = row[1].upper()\n # Test if new localisation is defined\n if row[3] is None or row[3] == '':\n log_module.error(\"{} :: Echec :: pas de localisation fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de localisation fournie\\n\".format(barcode))\n continue\n # log_module.info(\"{} :: Main :: Début du traitement\".format(barcode))\n # Transform location label in location code\n if row[3] not in locations_dict:\n log_module.error(\"{} :: Echec :: La localisation {} est inconnue dans Alma\".format(barcode,row[3]))\n report.write(\"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\".format(barcode,row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug(\"{} :: Succes :: A affecter dans la localisation {}\".format(barcode,location_code))\n \n\n # Get datas item with barcode\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,response))\n report.write(\"{}\\tErreur Retrouve Exemplaire\\t{}\\n\".format(barcode,response))\n continue\n # Change location and remove holdinds infos\n item = ET.fromstring(response)\n mms_id, old_holding_id,item_id = item_change_location(item,location_code, call)\n # log_module.debug(\"{} :: {} - {} - {}\".format(barcode,mms_id,old_holding_id,item_id))\n # Upadte item in Alma\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,set_response))\n report.write(\"{}\\tErreur Mise à jour Exemplaire\\t{}\\n\".format(barcode,set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find(\".//holding_id\").text\n processed_record_dict[mms_id] = {\n location_code: call\n }\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {\n 'call' : call,\n 'barcode': barcode\n }\n log_module.info(\"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\".format(barcode,new_holding_id))\nlog_module.info(\"FIN DU DEPLACEMENT DES EXEMPLAIRES\")\n\n###Update new holding sequence\n# ############################\nlog_module.info(\"DEBUT DE LA MODIFICATION DES HOLDINGS\")\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n # Get new holding\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id)\n if get_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,get_holding_response))\n report.write(\"{}\\tErreur Retrouve Holding\\t{}\\n\".format(barcode,get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response,call)\n #Update new Holding in Alma\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding)\n if set_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,set_holding_response))\n report.write(\"{}\\tErreur Ecriture Holding\\t{}\\n\".format(barcode,set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info(\"{} :: Succes :: La holding a été mise à jour\".format(new_holding_id))\n\nreport.close\n\nmulti_call_report.close\nlog_module.info(\"FIN DU TRAITEMENT\")\n\n ", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import requests import json from termcolor import cprint from pathlib import Path import os def console_check(csl, f): if csl == 'playstation-4': f.write('\tdbo:computingPlatform dbpedia:PlayStation_4.') if csl == 'playstation-3': f.write('\tdbo:computingPlatform dbpedia:PlayStation_3.') if csl == 'playstation-2': f.write('\tdbo:computingPlatform dbpedia:PlayStation_2.') if csl == 'playstation': f.write('\tdbo:computingPlatform dbpedia:PlayStation.') if csl == 'xbox-one': f.write('\tdbo:computingPlatform dbpedia:Xbox_One.') if csl == 'xbox-360': f.write('\tdbo:computingPlatform dbpedia:Xbox_360.') if csl == 'switch': f.write('\tdbo:computingPlatform dbpedia:Nintendo_Switch.') if csl == 'pc': f.write('\tdbo:computingPlatform dbpedia:Computer.') f.write('\n\n') def initial_warnings(): cprint("Esse programa funciona usando uma API chamada Chicken Coop API.", "red", attrs=['bold']) cprint("Essa API pega informações sobre jogos de determinados consoles.", "red", attrs=['bold']) cprint("Para que ela rode corretamente, siga as seguintes instruções:", "cyan", attrs=['bold']) cprint("Consoles:", 'yellow', attrs=['bold']) cprint(" Playstation 4 -> playstation-4", "green", attrs=['bold']) cprint(" Xbox One -> xbox-one", "green", attrs=['bold']) cprint(" Computador -> pc", "green", attrs=['bold']) cprint(" Nintendo Switch -> switch", "green", attrs=['bold']) cprint("Exemplos de jogos: ", 'yellow', attrs=['bold']) cprint(" Uncharted: The Lost Legacy", "green", attrs=['bold']) cprint(" God of War", "green", attrs=['bold']) cprint(" Ori and The Blind Forest", "green", attrs=['bold']) cprint("Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada," " caso contrário, não funcionará!", 'magenta', attrs=['bold']) print("\n") def get_and_write(mc, csl): print(f"Title: {mc['result']['title']}") print(f"Release Date: {mc['result']['releaseDate']}") # print(f"Description: {mc['result']['description']}") print(f"Score: {mc['result']['score']}") # print(f"Rating: {mc['result']['rating']}") print(f"Developer: {mc['result']['developer']}\n") mc_title = mc['result']['title'] # mc_description = mc['result']['description'] mc_score = mc['result']['score'] mc_developer = mc['result']['developer'] rsp = write_file(mc_title, mc_score, mc_developer, mc, csl) if rsp: write_file(mc_title, mc_score, mc_developer, mc, csl) def write_file(title, score, developer, mc, csl): source = "<https://www.metacritic.com/game/" aux_title = '' source = source + csl + '/' path = Path('gamedeflib_rdf.ttl') if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0: file = open('gamedeflib_rdf.ttl', 'r') count = 1 for element in file: jogo = f'_:game{count}\n' if element == jogo: count = count + 1 file.close() file = open('gamedeflib_rdf.ttl', 'a+') file.write(f'\n_:game{count}\n') file.write(f'\trdfs:label "{title}";\n') file.write(f'\tdbp:score {score};\n') genre_number(mc, file) publisher_number(mc, file) file.write(f'\tdbo:developer "{developer}";\n') aux_title = title.lower() aux_title = aux_title.replace(":", "") aux_title = aux_title.replace(" ", "-") source = source + aux_title + ">" file.write(f'\tdc:source {source};\n') console_check(csl, file) file.close() else: file = open('gamedeflib_rdf.ttl', 'w+') file.write("@prefix dc: <http://purl.org/dc/elements/1.1/> .\n") file.write("@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n") file.write("@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n") file.write("@prefix foaf: <http://xmlns.com/foaf/0.1/> .\n") file.write("@prefix dbo: <http://dbpedia.org/ontology/> .\n") file.write("@prefix dbpedia: <http://dbpedia.org/page/> .\n") file.write("@prefix dbp: <http://dbpedia.org/property/> .\n") file.write('dbpedia:PlayStation_4\n' '\tfoaf:name "PlayStation 4";\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 4".\n\n') file.write('dbpedia:PlayStation_3\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 3".\n\n') file.write('dbpedia:PlayStation_2\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 2".\n\n') file.write('dbpedia:PlayStation\n' '\tdbp:type dbpedia:Video_game_console;\n' '\trdfs:label "PlayStation".\n\n') file.write('dbpedia:XBox_One\n' '\tfoaf:name "XBox One";\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "XBox One" .\n\n') file.write('dbpedia:XBox_360\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "XBox 360" .\n\n') file.write('dbpedia:Nintendo_Switch\n' '\tfoaf:name "New Nintendank New Wii U 2.0+";\n' '\tdbo:type dbpedia:Video_game_hardware;\n' '\trdfs:label "Nintendo Switch" .\n\n') file.write('dbpedia:Computer\n' '\tdbp:title "Computer";\n' '\trdf:type dbo:Device;\n' '\trdfs:label "Computer" .\n\n') return 1 def genre_number(mc, f): tam = len(mc['result']['genre']) for x in range(0, tam): print(f"Genre number {x+1}: {mc['result']['genre'][x]}") aux = mc['result']['genre'][x] f.write(f'\tdbo:genre "{aux}";\n') def publisher_number(mc, f): tam = len(mc['result']['publisher']) for x in range(0, tam): print(f"Publisher number {x + 1}: {mc['result']['publisher'][x]}") aux = mc['result']['publisher'][x] f.write(f'\tdbo:publisher "{aux}";\n') def main(): print('Digite o console do jogo desejado: ', end='') console = str(input()) print('Digite o título do jogo desejado: ', end='') title = str(input()) try: url = "https://chicken-coop.p.rapidapi.com/games/"+title querystring = {"platform": console} headers = { 'x-rapidapi-host': "chicken-coop.p.rapidapi.com", 'x-rapidapi-key': "c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26" } response = requests.request("GET", url, headers=headers, params=querystring) metacritic = json.loads(response.text) if metacritic['result'] == 'No result': print("\nAlguma informação digitada está incorreta. Tente novamente.") else: get_and_write(metacritic, console) except Exception as err: print("Algum erro desconhecido ocorreu durante a execucação.\nTente novamente.") cprint(err, 'red') initial_warnings() main() while True: print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ', end='') try: ans = int(input()) if ans == 1: main() elif ans == 0: print('Encerrando o script') break else: print('Valor digitado deve ser 0 ou 1.') except ValueError as e: print('Valor foi inserido incorretamente. Tente denovo.') cprint(e, 'red')
normal
{ "blob_id": "b290763362af96f5af03fa31f4936339cef66a1d", "index": 2062, "step-1": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\ninitial_warnings()\nmain()\nwhile True:\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ',\n end='')\n try:\n ans = int(input())\n if ans == 1:\n main()\n elif ans == 0:\n print('Encerrando o script')\n break\n else:\n print('Valor digitado deve ser 0 ou 1.')\n except ValueError as e:\n print('Valor foi inserido incorretamente. Tente denovo.')\n cprint(e, 'red')\n", "step-4": "import requests\nimport json\nfrom termcolor import cprint\nfrom pathlib import Path\nimport os\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\ninitial_warnings()\nmain()\nwhile True:\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ',\n end='')\n try:\n ans = int(input())\n if ans == 1:\n main()\n elif ans == 0:\n print('Encerrando o script')\n break\n else:\n print('Valor digitado deve ser 0 ou 1.')\n except ValueError as e:\n print('Valor foi inserido incorretamente. Tente denovo.')\n cprint(e, 'red')\n", "step-5": "import requests\r\nimport json\r\nfrom termcolor import cprint\r\nfrom pathlib import Path\r\nimport os\r\n\r\n\r\ndef console_check(csl, f):\r\n if csl == 'playstation-4':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\r\n if csl == 'playstation-3':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\r\n if csl == 'playstation-2':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\r\n if csl == 'playstation':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\r\n if csl == 'xbox-one':\r\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\r\n if csl == 'xbox-360':\r\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\r\n if csl == 'switch':\r\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\r\n if csl == 'pc':\r\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\r\n f.write('\\n\\n')\r\n\r\n\r\ndef initial_warnings():\r\n cprint(\"Esse programa funciona usando uma API chamada Chicken Coop API.\", \"red\", attrs=['bold'])\r\n cprint(\"Essa API pega informações sobre jogos de determinados consoles.\", \"red\", attrs=['bold'])\r\n cprint(\"Para que ela rode corretamente, siga as seguintes instruções:\", \"cyan\", attrs=['bold'])\r\n cprint(\"Consoles:\", 'yellow', attrs=['bold'])\r\n cprint(\" Playstation 4 -> playstation-4\", \"green\", attrs=['bold'])\r\n cprint(\" Xbox One -> xbox-one\", \"green\", attrs=['bold'])\r\n cprint(\" Computador -> pc\", \"green\", attrs=['bold'])\r\n cprint(\" Nintendo Switch -> switch\", \"green\", attrs=['bold'])\r\n cprint(\"Exemplos de jogos: \", 'yellow', attrs=['bold'])\r\n cprint(\" Uncharted: The Lost Legacy\", \"green\", attrs=['bold'])\r\n cprint(\" God of War\", \"green\", attrs=['bold'])\r\n cprint(\" Ori and The Blind Forest\", \"green\", attrs=['bold'])\r\n cprint(\"Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada,\"\r\n \" caso contrário, não funcionará!\", 'magenta', attrs=['bold'])\r\n print(\"\\n\")\r\n\r\n\r\ndef get_and_write(mc, csl):\r\n print(f\"Title: {mc['result']['title']}\")\r\n print(f\"Release Date: {mc['result']['releaseDate']}\")\r\n # print(f\"Description: {mc['result']['description']}\")\r\n print(f\"Score: {mc['result']['score']}\")\r\n # print(f\"Rating: {mc['result']['rating']}\")\r\n print(f\"Developer: {mc['result']['developer']}\\n\")\r\n mc_title = mc['result']['title']\r\n # mc_description = mc['result']['description']\r\n mc_score = mc['result']['score']\r\n mc_developer = mc['result']['developer']\r\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\r\n if rsp:\r\n write_file(mc_title, mc_score, mc_developer, mc, csl)\r\n\r\n\r\ndef write_file(title, score, developer, mc, csl):\r\n source = \"<https://www.metacritic.com/game/\"\r\n aux_title = ''\r\n source = source + csl + '/'\r\n path = Path('gamedeflib_rdf.ttl')\r\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\r\n file = open('gamedeflib_rdf.ttl', 'r')\r\n count = 1\r\n for element in file:\r\n jogo = f'_:game{count}\\n'\r\n if element == jogo:\r\n count = count + 1\r\n file.close()\r\n file = open('gamedeflib_rdf.ttl', 'a+')\r\n file.write(f'\\n_:game{count}\\n')\r\n file.write(f'\\trdfs:label \"{title}\";\\n')\r\n file.write(f'\\tdbp:score {score};\\n')\r\n genre_number(mc, file)\r\n publisher_number(mc, file)\r\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\r\n aux_title = title.lower()\r\n aux_title = aux_title.replace(\":\", \"\")\r\n aux_title = aux_title.replace(\" \", \"-\")\r\n source = source + aux_title + \">\"\r\n file.write(f'\\tdc:source {source};\\n')\r\n console_check(csl, file)\r\n file.close()\r\n else:\r\n file = open('gamedeflib_rdf.ttl', 'w+')\r\n file.write(\"@prefix dc: \t<http://purl.org/dc/elements/1.1/> .\\n\")\r\n file.write(\"@prefix rdf:\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n\")\r\n file.write(\"@prefix rdfs:\t<http://www.w3.org/2000/01/rdf-schema#> .\\n\")\r\n file.write(\"@prefix foaf:\t<http://xmlns.com/foaf/0.1/> .\\n\")\r\n file.write(\"@prefix dbo: <http://dbpedia.org/ontology/> .\\n\")\r\n file.write(\"@prefix dbpedia: <http://dbpedia.org/page/> .\\n\")\r\n file.write(\"@prefix dbp: <http://dbpedia.org/property/> .\\n\")\r\n file.write('dbpedia:PlayStation_4\\n'\r\n '\\tfoaf:name \"PlayStation 4\";\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 4\".\\n\\n')\r\n file.write('dbpedia:PlayStation_3\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 3\".\\n\\n')\r\n file.write('dbpedia:PlayStation_2\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 2\".\\n\\n')\r\n file.write('dbpedia:PlayStation\\n'\r\n '\\tdbp:type dbpedia:Video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation\".\\n\\n')\r\n file.write('dbpedia:XBox_One\\n'\r\n '\\tfoaf:name \"XBox One\";\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"XBox One\" .\\n\\n')\r\n file.write('dbpedia:XBox_360\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"XBox 360\" .\\n\\n')\r\n file.write('dbpedia:Nintendo_Switch\\n'\r\n '\\tfoaf:name \"New Nintendank New Wii U 2.0+\";\\n'\r\n '\\tdbo:type dbpedia:Video_game_hardware;\\n'\r\n '\\trdfs:label \"Nintendo Switch\" .\\n\\n')\r\n file.write('dbpedia:Computer\\n'\r\n '\\tdbp:title \"Computer\";\\n'\r\n '\\trdf:type dbo:Device;\\n'\r\n '\\trdfs:label \"Computer\" .\\n\\n')\r\n return 1\r\n\r\n\r\ndef genre_number(mc, f):\r\n tam = len(mc['result']['genre'])\r\n for x in range(0, tam):\r\n print(f\"Genre number {x+1}: {mc['result']['genre'][x]}\")\r\n aux = mc['result']['genre'][x]\r\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\r\n\r\n\r\ndef publisher_number(mc, f):\r\n tam = len(mc['result']['publisher'])\r\n for x in range(0, tam):\r\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\r\n aux = mc['result']['publisher'][x]\r\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\r\n\r\n\r\ndef main():\r\n print('Digite o console do jogo desejado: ', end='')\r\n console = str(input())\r\n print('Digite o título do jogo desejado: ', end='')\r\n title = str(input())\r\n try:\r\n url = \"https://chicken-coop.p.rapidapi.com/games/\"+title\r\n\r\n querystring = {\"platform\": console}\r\n\r\n headers = {\r\n 'x-rapidapi-host': \"chicken-coop.p.rapidapi.com\",\r\n 'x-rapidapi-key': \"c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26\"\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n metacritic = json.loads(response.text)\r\n\r\n if metacritic['result'] == 'No result':\r\n print(\"\\nAlguma informação digitada está incorreta. Tente novamente.\")\r\n else:\r\n get_and_write(metacritic, console)\r\n\r\n except Exception as err:\r\n print(\"Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.\")\r\n cprint(err, 'red')\r\n\r\n\r\ninitial_warnings()\r\nmain()\r\nwhile True:\r\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ', end='')\r\n try:\r\n ans = int(input())\r\n if ans == 1:\r\n main()\r\n elif ans == 0:\r\n print('Encerrando o script')\r\n break\r\n else:\r\n print('Valor digitado deve ser 0 ou 1.')\r\n except ValueError as e:\r\n print('Valor foi inserido incorretamente. Tente denovo.')\r\n cprint(e, 'red')\r\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
from .base import BaseEngine import re class YandexSearch(BaseEngine): base_url = "https://yandex.com" search_url = "https://yandex.com/search/" def get_params(self, query, **params): params["text"] = query params["p"] = None return params def next_url(self, soup): if (regex := re.findall(r'"(/search/\?[^>]+p=[^"]+)', str(soup))): return self.base_url + regex[-1] def parse_soup(self, soup): for raw in soup.find_all('li', class_="serp-item"): if (url := raw.a.get("href")): yield url def captcha(self, response): return "showcaptcha" in response.url
normal
{ "blob_id": "0ec3ca0f952dbc09c7a7a3e746c0aeab28ee9834", "index": 6498, "step-1": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n <mask token>\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n", "step-2": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n", "step-3": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n", "step-4": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n", "step-5": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = \"https://yandex.com\"\n search_url = \"https://yandex.com/search/\"\n\n def get_params(self, query, **params):\n params[\"text\"] = query\n params[\"p\"] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall(r'\"(/search/\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_=\"serp-item\"):\n if (url := raw.a.get(\"href\")):\n yield url\n\n def captcha(self, response):\n return \"showcaptcha\" in response.url\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
#!/usr/bin/env python # @HEADER # ************************************************************************ # # TriBITS: Tribal Build, Integrate, and Test System # Copyright 2013 Sandia Corporation # # Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, # the U.S. Government retains certain rights in this software. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the Corporation nor the names of the # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ************************************************************************ # @HEADER # # Usage: mockprogram.py [any arguments] # # Mock program that takes input arguments and produces stdout by reading from # a file .mockprogram_inout.txt in the current directory or the file specified # by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any # directory). This script is used to take the place of real commands during a # test that involves calling commands on the commandline. # # The file .mockprogram_inout.txt (or pointed to by # MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form: # # MOCK_PROGRAM_INPUT: <args_1> # MOCK_PROGRAM_RETURN: <rtn> # MOCK_PROGRAM_OUTPUT: <outline_1_line_1> # <outline_1_line_2> # ... # MOCK_PROGRAM_INPUT: <args_2> # # The program reads in the blocks starting at the time and removes the block # from the file after it runs. After all of the blocks are read in, if run # again it will error out with error code 2. # # This program can be used, for example, to simulate git command. For # example, a couple of git commits might be simulated like: # # MOCK_PROGRAM_INPUT: log -1 # MOCK_PROGRAM_RETURN: 0 # MOCK_PROGRAM_OUTPUT: This is the summary line # # The is the body of the commit msg # MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u} # MOCK_PROGRAM_RETURN: 0 # MOCK_PROGRAM_OUTPUT: file_name_1.txt # file_name_2.txt # file_name_3.txt # import sys import os inputArgs = ' '.join(sys.argv[1:]) #print("inputArgs = '" + inputArgs + "'" if os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE"): mockProgramInOutFilePath=os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE") else: mockProgramInOutFilePath='.mockprogram_inout.txt' if not os.path.exists(mockProgramInOutFilePath): print("Error: "+mockProgramInOutFilePath+" is missing!") sys.exit(1) mockprogramInout = open(mockProgramInOutFilePath, 'r').read() mockprogramInoutArray = mockprogramInout.splitlines() if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == "": mockprogramInoutArray = mockprogramInoutArray[:-1] if len(mockprogramInoutArray) < 3: print("Error: "+mockProgramInOutFilePath+" has less than three lines:\n" "-------------\n" + mockprogramInout + "-------------") sys.exit(2) # Assert input expectedInputLine = mockprogramInoutArray[0] if expectedInputLine.find("MOCK_PROGRAM_INPUT:") != 0: print("Error, first line = '" + expectedInputLine + "', does not match " "^MOCK_PROGRAM_INPUT:") sys.exit(3) expectedInput = expectedInputLine.replace("MOCK_PROGRAM_INPUT:", "").strip() if inputArgs != expectedInput: print("Error, input args='" + inputArgs + "' does not match expected='" + expectedInput + "'") sys.exit(4) # Get return code returnCodeLine = mockprogramInoutArray[1] if returnCodeLine.find("MOCK_PROGRAM_RETURN:") != 0: print("Error, second line = '" + returnCodeLine + "', does not match " "^MOCK_PROGRAM_RETURN:") sys.exit(5) returnCode = returnCodeLine.replace("MOCK_PROGRAM_RETURN:", "").strip() # Get output (can be multi-line) outputLine = mockprogramInoutArray[2] if outputLine.find("MOCK_PROGRAM_OUTPUT:") != 0: print("Error, third line = '" + outputLine + "', does not match " "^MOCK_PROGRAM_OUTPUT:") sys.exit(6) outputStr = outputLine.replace("MOCK_PROGRAM_OUTPUT: ", "") numLinesOuput = 1 if len(mockprogramInoutArray) > 3: for line in mockprogramInoutArray[3:]: if line.find("MOCK_PROGRAM_INPUT:") == 0: break outputStr = outputStr+"\n"+line numLinesOuput = numLinesOuput + 1 print(outputStr) # Write the remaining lines back into the file lineLineIndex = 2 + numLinesOuput if len(mockprogramInoutArray) > lineLineIndex: open(mockProgramInOutFilePath, 'w').write( ('\n'.join(mockprogramInoutArray[lineLineIndex:]))+"\n" ) else: open(mockProgramInOutFilePath, 'w').write("") # Return exit code sys.exit(int(returnCode))
normal
{ "blob_id": "550f5ad4fef77d5795db0393ae0701f679143e72", "index": 221, "step-1": "<mask token>\n", "step-2": "<mask token>\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\n<mask token>\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\n<mask token>\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\n<mask token>\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n<mask token>\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\n<mask token>\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\n<mask token>\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n<mask token>\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n", "step-3": "<mask token>\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n", "step-4": "import sys\nimport os\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n", "step-5": "#!/usr/bin/env python\n\n# @HEADER\n# ************************************************************************\n#\n# TriBITS: Tribal Build, Integrate, and Test System\n# Copyright 2013 Sandia Corporation\n#\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the Corporation nor the names of the\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION \"AS IS\" AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# ************************************************************************\n# @HEADER\n\n#\n# Usage: mockprogram.py [any arguments]\n#\n# Mock program that takes input arguments and produces stdout by reading from\n# a file .mockprogram_inout.txt in the current directory or the file specified\n# by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any\n# directory). This script is used to take the place of real commands during a\n# test that involves calling commands on the commandline.\n#\n# The file .mockprogram_inout.txt (or pointed to by\n# MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form:\n#\n# MOCK_PROGRAM_INPUT: <args_1>\n# MOCK_PROGRAM_RETURN: <rtn>\n# MOCK_PROGRAM_OUTPUT: <outline_1_line_1>\n# <outline_1_line_2>\n# ...\n# MOCK_PROGRAM_INPUT: <args_2>\n#\n# The program reads in the blocks starting at the time and removes the block\n# from the file after it runs. After all of the blocks are read in, if run\n# again it will error out with error code 2.\n#\n# This program can be used, for example, to simulate git command. For\n# example, a couple of git commits might be simulated like:\n#\n# MOCK_PROGRAM_INPUT: log -1\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: This is the summary line\n#\n# The is the body of the commit msg\n# MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u}\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: file_name_1.txt\n# file_name_2.txt\n# file_name_3.txt\n\n#\n\nimport sys\nimport os\n\ninputArgs = ' '.join(sys.argv[1:])\n#print(\"inputArgs = '\" + inputArgs + \"'\"\n\nif os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\"):\n mockProgramInOutFilePath=os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\")\nelse:\n mockProgramInOutFilePath='.mockprogram_inout.txt'\n\nif not os.path.exists(mockProgramInOutFilePath):\n print(\"Error: \"+mockProgramInOutFilePath+\" is missing!\")\n sys.exit(1)\n\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == \"\":\n mockprogramInoutArray = mockprogramInoutArray[:-1]\n\nif len(mockprogramInoutArray) < 3:\n print(\"Error: \"+mockProgramInOutFilePath+\" has less than three lines:\\n\"\n \"-------------\\n\" + mockprogramInout + \"-------------\")\n sys.exit(2)\n\n# Assert input\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find(\"MOCK_PROGRAM_INPUT:\") != 0:\n print(\"Error, first line = '\" + expectedInputLine + \"', does not match \"\n \"^MOCK_PROGRAM_INPUT:\") \n sys.exit(3)\nexpectedInput = expectedInputLine.replace(\"MOCK_PROGRAM_INPUT:\", \"\").strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n\n# Get return code\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find(\"MOCK_PROGRAM_RETURN:\") != 0:\n print(\"Error, second line = '\" + returnCodeLine + \"', does not match \"\n \"^MOCK_PROGRAM_RETURN:\") \n sys.exit(5)\nreturnCode = returnCodeLine.replace(\"MOCK_PROGRAM_RETURN:\", \"\").strip()\n\n# Get output (can be multi-line)\noutputLine = mockprogramInoutArray[2]\nif outputLine.find(\"MOCK_PROGRAM_OUTPUT:\") != 0:\n print(\"Error, third line = '\" + outputLine + \"', does not match \"\n \"^MOCK_PROGRAM_OUTPUT:\") \n sys.exit(6)\noutputStr = outputLine.replace(\"MOCK_PROGRAM_OUTPUT: \", \"\")\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find(\"MOCK_PROGRAM_INPUT:\") == 0:\n break\n outputStr = outputStr+\"\\n\"+line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n\n# Write the remaining lines back into the file\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write(\n ('\\n'.join(mockprogramInoutArray[lineLineIndex:]))+\"\\n\" )\nelse:\n open(mockProgramInOutFilePath, 'w').write(\"\")\n\n# Return exit code\nsys.exit(int(returnCode))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('main', '0036_auto_20180516_1818')] operations = [migrations.AddField(model_name='promotion', name='image', field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'), preserve_default=False)] <|reserved_special_token_1|> from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('main', '0036_auto_20180516_1818')] operations = [migrations.AddField(model_name='promotion', name='image', field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'), preserve_default=False)] <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-05-16 12:24 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0036_auto_20180516_1818'), ] operations = [ migrations.AddField( model_name='promotion', name='image', field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'), preserve_default=False, ), ]
flexible
{ "blob_id": "a7add26a919a41e52ae41c6b4c4079eadaa8aa1d", "index": 851, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-16 12:24\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0036_auto_20180516_1818'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='promotion',\n name='image',\n field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),\n preserve_default=False,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' extract package names from the Meteor guide and write them to packages-guide Uses the content folder of https://github.com/meteor/guide ''' from collections import defaultdict import os import sys import markdown from bs4 import BeautifulSoup def get_links_from_markdown(path, name): try: with open(path, 'r') as file: md = file.read() html = markdown.markdown(md) soup = BeautifulSoup(html, 'html.parser') return soup.find_all('a') except PermissionError: print('Could not open "%s"' % path) except UnicodeDecodeError: print('Could not proccess "%s"' % path) return [] def get_guide_packages(src_dir='content'): if len(sys.argv) > 1: src_dir = sys.argv[1] subjects = defaultdict(list) for entry in os.scandir(src_dir): name = entry.name[:-3] for link in get_links_from_markdown(entry.path, name): if len(link.text.split(':')) == 2: # packages only subjects[name].append(link.text) return subjects def write_packages(packages, path='packages-guide'): with open(path, 'w') as out: out.write('\n# packages from http://guide.meteor.com\n') for subject, links in packages.items(): out.write('\n# %s\n' % subject) for link in links: out.write('%s\n' % link) if __name__ == '__main__': GUIDE = get_guide_packages() write_packages(GUIDE)
normal
{ "blob_id": "274185896ab5c11256d69699df69fc2c0dde4f2d", "index": 987, "step-1": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n", "step-4": "<mask token>\nfrom collections import defaultdict\nimport os\nimport sys\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n", "step-5": "''' extract package names from the Meteor guide and write them to packages-guide\n Uses the content folder of https://github.com/meteor/guide '''\n\nfrom collections import defaultdict\nimport os\nimport sys\n\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2: # packages only\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- import os import subprocess import virtualenv from templateserver import __version__ as version DEFAULT_TEMPLATE_DIR = 'templates' DEFAULT_MEDIA_DIR = 'media' DEFAULT_STATIC_DIR = 'static' DEFAULT_ENV_DIR = '.env' DEFAULT_RUNSERVER_PATH = 'runserver.py' RUNSERVER_TEMPLATE = os.path.abspath(os.path.join(os.path.dirname(__file__), 'runserver_template.py')) def install_virtualenv(envdir): if not os.path.exists(envdir): virtualenv.create_environment(envdir, False) def install_django(envdir, version): pip = os.path.join(envdir, 'bin', 'pip') subprocess.call([pip, 'install', 'django==%s' % version]) def install_runserver(envdir, runserverpath, templatedir, mediadir, staticdir): python = os.path.join(envdir, 'bin', 'python') with open(RUNSERVER_TEMPLATE) as fobj: template = fobj.read() runserver = template.replace( '$PYTHON$', python ).replace( '$MEDIADIR$', mediadir ).replace( '$STATICDIR$', staticdir ).replace( '$TEMPLATEDIR$', templatedir ).replace( '$VERSION$', version ) with open(runserverpath, 'w') as fobj: fobj.write(runserver) os.chmod(runserverpath, 0755) def install(templatedir=DEFAULT_TEMPLATE_DIR, mediadir=DEFAULT_MEDIA_DIR, staticdir=DEFAULT_STATIC_DIR, runserverpath=DEFAULT_RUNSERVER_PATH, envdir=DEFAULT_ENV_DIR, django='1.3'): """ Install the runserver.py script """ install_virtualenv(envdir) install_django(envdir, django) install_runserver(envdir, runserverpath, templatedir, mediadir, staticdir) def main(): import argparse def directory(s): path = os.path.abspath(s) if os.path.exists(path): return path raise argparse.ArgumentTypeError('directory %r does not exist' % path) parser = argparse.ArgumentParser() parser.add_argument('-d', '--django', dest='django', default='1.3', help='Django version to use.') parser.add_argument('-t', '--templatedir', help='Folder with your templates.', default=DEFAULT_TEMPLATE_DIR) parser.add_argument('-m', '--mediadir', help='Folder with your media files (css/js).', default=DEFAULT_MEDIA_DIR) parser.add_argument('-s', '--staticdir', help='Folder with your static files (css/js).', default=DEFAULT_STATIC_DIR) parser.add_argument('-r', '--runserverpath', help='Location for your runserver.py executable.', default=DEFAULT_RUNSERVER_PATH) args = parser.parse_args() install(django=args.django, templatedir=args.templatedir, mediadir=args.mediadir, staticdir=args.staticdir, runserverpath=args.runserverpath) print 'done' if __name__ == '__main__': main()
normal
{ "blob_id": "3f41cb1acbbb1a397ae1288bca1cbcd27c0d3f33", "index": 5143, "step-1": "# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nimport virtualenv\nfrom templateserver import __version__ as version\n\n\nDEFAULT_TEMPLATE_DIR = 'templates'\nDEFAULT_MEDIA_DIR = 'media'\nDEFAULT_STATIC_DIR = 'static'\nDEFAULT_ENV_DIR = '.env'\nDEFAULT_RUNSERVER_PATH = 'runserver.py'\n\nRUNSERVER_TEMPLATE = os.path.abspath(os.path.join(os.path.dirname(__file__), 'runserver_template.py'))\n\n\ndef install_virtualenv(envdir):\n if not os.path.exists(envdir):\n virtualenv.create_environment(envdir, False)\n\ndef install_django(envdir, version):\n pip = os.path.join(envdir, 'bin', 'pip')\n subprocess.call([pip, 'install', 'django==%s' % version])\n \ndef install_runserver(envdir, runserverpath, templatedir, mediadir, staticdir):\n python = os.path.join(envdir, 'bin', 'python')\n with open(RUNSERVER_TEMPLATE) as fobj:\n template = fobj.read()\n \n runserver = template.replace(\n '$PYTHON$', python\n ).replace(\n '$MEDIADIR$', mediadir\n ).replace(\n '$STATICDIR$', staticdir\n ).replace(\n '$TEMPLATEDIR$', templatedir\n ).replace(\n '$VERSION$', version\n )\n with open(runserverpath, 'w') as fobj:\n fobj.write(runserver)\n os.chmod(runserverpath, 0755)\n\ndef install(templatedir=DEFAULT_TEMPLATE_DIR, mediadir=DEFAULT_MEDIA_DIR,\n staticdir=DEFAULT_STATIC_DIR, runserverpath=DEFAULT_RUNSERVER_PATH,\n envdir=DEFAULT_ENV_DIR, django='1.3'):\n \"\"\"\n Install the runserver.py script\n \"\"\"\n install_virtualenv(envdir)\n install_django(envdir, django)\n install_runserver(envdir, runserverpath, templatedir, mediadir, staticdir)\n\n\ndef main():\n import argparse\n def directory(s):\n path = os.path.abspath(s)\n if os.path.exists(path):\n return path\n raise argparse.ArgumentTypeError('directory %r does not exist' % path)\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--django', dest='django', default='1.3',\n help='Django version to use.')\n parser.add_argument('-t', '--templatedir', help='Folder with your templates.',\n default=DEFAULT_TEMPLATE_DIR)\n parser.add_argument('-m', '--mediadir', help='Folder with your media files (css/js).',\n default=DEFAULT_MEDIA_DIR)\n parser.add_argument('-s', '--staticdir', help='Folder with your static files (css/js).',\n default=DEFAULT_STATIC_DIR)\n parser.add_argument('-r', '--runserverpath', help='Location for your runserver.py executable.',\n default=DEFAULT_RUNSERVER_PATH)\n args = parser.parse_args()\n install(django=args.django, templatedir=args.templatedir,\n mediadir=args.mediadir, staticdir=args.staticdir,\n runserverpath=args.runserverpath)\n print 'done'\n\nif __name__ == '__main__':\n main()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class Card: def check_cat(self, string): if 'Cat' in string: return True return False <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def steal(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_stolen = arr_players[recipient].hand.pop(loops.card_stealing( arr_players, recipient)) print('You stole', card_stolen.type) hand.remove(played_card) player.hand.append(card_stolen) return True, False def skip(self, attack, pick): print('Your turn has been skipped') pick = False return pick, attack <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Card: def check_cat(self, string): if 'Cat' in string: return True return False <|reserved_special_token_0|> def __str__(self): return self.type <|reserved_special_token_0|> def favor(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_taken = arr_players[recipient].hand.pop(loops.give_card( arr_players, recipient)) print(card_taken, 'was given') recipient.hand.remove(card_taken) player.hand.append(card_taken) return True, False def steal(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_stolen = arr_players[recipient].hand.pop(loops.card_stealing( arr_players, recipient)) print('You stole', card_stolen.type) hand.remove(played_card) player.hand.append(card_stolen) return True, False def skip(self, attack, pick): print('Your turn has been skipped') pick = False return pick, attack <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Card: def check_cat(self, string): if 'Cat' in string: return True return False def __init__(self, string): self.type = string self.cat = self.check_cat(self.type) def __str__(self): return self.type def nope(self, arr_players, cards, turn_order): count = 0 for i, k in enumerate(arr_players): if i != turn_order: for i, k in enumerate(k.hand): if k == cards[11]: count += 1 if count > 0: print('A nope card can be played') decision = input('Would a player like to play a nope card? (y/n)') while decision != 'y' and decision != 'n': decision = input( 'Would a player like to play a nope card? (y/n) ') if decision == 'n': return False elif decision == 'y': for i, k in enumerate(arr_players): print(str(i) + '-' + k.name) player = int(input( 'Which player would like to play the nope card?')) while (player < 0 or player > len(arr_players) ) and players == turn_order: player = int * input( 'Which player would like to play the nope card?') arr_players[player].hand.remove(cards[11]) return True return False def favor(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_taken = arr_players[recipient].hand.pop(loops.give_card( arr_players, recipient)) print(card_taken, 'was given') recipient.hand.remove(card_taken) player.hand.append(card_taken) return True, False def steal(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_stolen = arr_players[recipient].hand.pop(loops.card_stealing( arr_players, recipient)) print('You stole', card_stolen.type) hand.remove(played_card) player.hand.append(card_stolen) return True, False def skip(self, attack, pick): print('Your turn has been skipped') pick = False return pick, attack <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Card: def check_cat(self, string): if 'Cat' in string: return True return False def __init__(self, string): self.type = string self.cat = self.check_cat(self.type) def __str__(self): return self.type def nope(self, arr_players, cards, turn_order): count = 0 for i, k in enumerate(arr_players): if i != turn_order: for i, k in enumerate(k.hand): if k == cards[11]: count += 1 if count > 0: print('A nope card can be played') decision = input('Would a player like to play a nope card? (y/n)') while decision != 'y' and decision != 'n': decision = input( 'Would a player like to play a nope card? (y/n) ') if decision == 'n': return False elif decision == 'y': for i, k in enumerate(arr_players): print(str(i) + '-' + k.name) player = int(input( 'Which player would like to play the nope card?')) while (player < 0 or player > len(arr_players) ) and players == turn_order: player = int * input( 'Which player would like to play the nope card?') arr_players[player].hand.remove(cards[11]) return True return False def favor(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_taken = arr_players[recipient].hand.pop(loops.give_card( arr_players, recipient)) print(card_taken, 'was given') recipient.hand.remove(card_taken) player.hand.append(card_taken) return True, False def steal(self, hand, player, arr_players, played_card): recipient = loops.phase_of_taking(arr_players, player) card_stolen = arr_players[recipient].hand.pop(loops.card_stealing( arr_players, recipient)) print('You stole', card_stolen.type) hand.remove(played_card) player.hand.append(card_stolen) return True, False def skip(self, attack, pick): print('Your turn has been skipped') pick = False return pick, attack def attack(self, attack, pick): attack = True pick = False return pick, attack def see_future(self, decker): if decker.cards_left() < 3: for i in range(decker.cards_left()): card = decker.draw_top(i) print(card.type) decker.add_card(card, i) else: for i in range(3): card = decker.draw_top(i) print(card.type) decker.add_card(card, i) <|reserved_special_token_1|> import loops class Card(): #to make a card you must type Card("Name of Card") def check_cat(self,string): if "Cat" in string: return True return False def __init__(self,string): self.type = string self.cat = self.check_cat(self.type) # self.image_back = image_back # self.image_front = image_front def __str__(self): return self.type #negates any action, except a defuse def nope(self,arr_players,cards,turn_order): count = 0 for i,k in enumerate(arr_players): if i != turn_order: for i,k in enumerate(k.hand): if k == cards[11]: count += 1 if count > 0: print("A nope card can be played") decision = input("Would a player like to play a nope card? (y/n)") while decision != "y" and decision != "n": decision = input("Would a player like to play a nope card? (y/n) ") if decision == "n": return False elif decision == 'y': for i,k in enumerate(arr_players): print(str(i)+"-"+k.name) player = int(input("Which player would like to play the nope card?")) while (player < 0 or player > len(arr_players)) and players == turn_order: player = int*input("Which player would like to play the nope card?") arr_players[player].hand.remove(cards[11]) return True return False #makes another player choose a card to give away to current player def favor(self,hand,player,arr_players,played_card): recipient = loops.phase_of_taking(arr_players,player) card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient)) print(card_taken,"was given") recipient.hand.remove(card_taken) player.hand.append(card_taken) return True,False #allows a player to steal a card from another player def steal(self,hand,player,arr_players,played_card): recipient = loops.phase_of_taking(arr_players,player) card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient)) print("You stole",card_stolen.type) hand.remove(played_card) player.hand.append(card_stolen) return True,False #makes the player skip a turn def skip(self,attack,pick): print("Your turn has been skipped") pick = False return pick,attack #the player makes the next person take his turn as well, forcing them to take 2 turns def attack(self,attack,pick): attack = True pick = False return pick,attack #see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions def see_future(self,decker): if decker.cards_left() < 3: for i in range(decker.cards_left()): card = decker.draw_top(i) print(card.type) decker.add_card(card,i) else: for i in range(3): card = decker.draw_top(i) print(card.type) decker.add_card(card,i)
flexible
{ "blob_id": "3b71ef6c3681b8c5e6aadf2d125c35cbf3a12661", "index": 6248, "step-1": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n\n def __str__(self):\n return self.type\n <mask token>\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n", "step-4": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n\n def attack(self, attack, pick):\n attack = True\n pick = False\n return pick, attack\n\n def see_future(self, decker):\n if decker.cards_left() < 3:\n for i in range(decker.cards_left()):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n else:\n for i in range(3):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n", "step-5": "import loops\r\n\r\nclass Card():\r\n #to make a card you must type Card(\"Name of Card\")\r\n def check_cat(self,string):\r\n if \"Cat\" in string:\r\n return True\r\n return False\r\n def __init__(self,string):\r\n self.type = string\r\n self.cat = self.check_cat(self.type)\r\n # self.image_back = image_back\r\n # self.image_front = image_front\r\n def __str__(self):\r\n return self.type\r\n #negates any action, except a defuse\r\n def nope(self,arr_players,cards,turn_order):\r\n count = 0\r\n for i,k in enumerate(arr_players):\r\n if i != turn_order:\r\n for i,k in enumerate(k.hand):\r\n if k == cards[11]:\r\n count += 1\r\n if count > 0:\r\n print(\"A nope card can be played\")\r\n decision = input(\"Would a player like to play a nope card? (y/n)\")\r\n while decision != \"y\" and decision != \"n\":\r\n decision = input(\"Would a player like to play a nope card? (y/n) \")\r\n if decision == \"n\":\r\n return False\r\n elif decision == 'y':\r\n for i,k in enumerate(arr_players):\r\n print(str(i)+\"-\"+k.name)\r\n player = int(input(\"Which player would like to play the nope card?\"))\r\n while (player < 0 or player > len(arr_players)) and players == turn_order:\r\n player = int*input(\"Which player would like to play the nope card?\")\r\n arr_players[player].hand.remove(cards[11])\r\n return True\r\n return False\r\n\r\n #makes another player choose a card to give away to current player\r\n def favor(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))\r\n print(card_taken,\"was given\")\r\n recipient.hand.remove(card_taken)\r\n player.hand.append(card_taken)\r\n return True,False\r\n #allows a player to steal a card from another player\r\n def steal(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))\r\n print(\"You stole\",card_stolen.type)\r\n hand.remove(played_card)\r\n player.hand.append(card_stolen)\r\n return True,False\r\n #makes the player skip a turn\r\n def skip(self,attack,pick):\r\n print(\"Your turn has been skipped\")\r\n pick = False\r\n return pick,attack\r\n #the player makes the next person take his turn as well, forcing them to take 2 turns\r\n def attack(self,attack,pick):\r\n attack = True\r\n pick = False\r\n return pick,attack\r\n #see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions\r\n def see_future(self,decker):\r\n if decker.cards_left() < 3:\r\n for i in range(decker.cards_left()):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i)\r\n else:\r\n for i in range(3):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i) ", "step-ids": [ 4, 6, 8, 10, 12 ] }
[ 4, 6, 8, 10, 12 ]
from django.contrib import admin from .models import Invite class InviteAdmin(admin.ModelAdmin): list_display = ('invitee', 'inviter', 'created_on', 'approved', 'rejected', 'used') admin.site.register(Invite, InviteAdmin)
normal
{ "blob_id": "fcb13b087b9c967ab16b64885411cc4aae98583c", "index": 2130, "step-1": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\nadmin.site.register(Invite, InviteAdmin)\n", "step-4": "from django.contrib import admin\nfrom .models import Invite\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\nadmin.site.register(Invite, InviteAdmin)\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Item(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def save(self, *args, **kwargs): if self.price is None: self.price = self.original_price * self.markup_percentage / 100 super(Item, self).save(*args, **kwargs) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Item(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def save(self, *args, **kwargs): if self.price is None: self.price = self.original_price * self.markup_percentage / 100 super(Item, self).save(*args, **kwargs) def __str__(self): if self.discount_percentage == 0: return self.name + ' - ' + str(self.price) + '€' else: return self.name + ' - ' + str(self.price * ((100 - self. discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self. discount_percentage) + '%' <|reserved_special_token_1|> <|reserved_special_token_0|> class Item(models.Model): name = models.CharField(max_length=999, unique=True) description = models.TextField(blank=True) random_str = models.CharField(max_length=999, default=id_generator) original_price = models.FloatField() markup_percentage = models.PositiveIntegerField(default=120) price = models.FloatField(blank=True) discount_percentage = models.PositiveIntegerField(default=0) img = models.ImageField() img_2 = models.ImageField(null=True, blank=True) img_3 = models.ImageField(null=True, blank=True) img_4 = models.ImageField(null=True, blank=True) def save(self, *args, **kwargs): if self.price is None: self.price = self.original_price * self.markup_percentage / 100 super(Item, self).save(*args, **kwargs) def __str__(self): if self.discount_percentage == 0: return self.name + ' - ' + str(self.price) + '€' else: return self.name + ' - ' + str(self.price * ((100 - self. discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self. discount_percentage) + '%' <|reserved_special_token_1|> from django.db import models import string import random def id_generator(size=32, chars=string.ascii_uppercase + string.digits): exists = True while exists == True: ran = ''.join(random.choice(chars) for _ in range(size)) if len(Item.objects.filter(random_str=ran)) == 0: exists = False return ran class Item(models.Model): name = models.CharField(max_length=999, unique=True) description = models.TextField(blank=True) random_str = models.CharField(max_length=999, default=id_generator) original_price = models.FloatField() markup_percentage = models.PositiveIntegerField(default=120) price = models.FloatField(blank=True) discount_percentage = models.PositiveIntegerField(default=0) img = models.ImageField() img_2 = models.ImageField(null=True, blank=True) img_3 = models.ImageField(null=True, blank=True) img_4 = models.ImageField(null=True, blank=True) def save(self, *args, **kwargs): if self.price is None: self.price = self.original_price * self.markup_percentage / 100 super(Item, self).save(*args, **kwargs) def __str__(self): if self.discount_percentage == 0: return self.name + ' - ' + str(self.price) + '€' else: return self.name + ' - ' + str(self.price * ((100 - self. discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self. discount_percentage) + '%' <|reserved_special_token_1|> from django.db import models import string import random def id_generator(size=32, chars=string.ascii_uppercase + string.digits): exists = True while exists == True: ran = ''.join(random.choice(chars) for _ in range(size)) if len(Item.objects.filter(random_str=ran)) == 0: exists = False return ran # Create your models here. class Item(models.Model): name = models.CharField(max_length=999, unique=True) description = models.TextField(blank=True) random_str = models.CharField(max_length=999, default=id_generator) original_price = models.FloatField() markup_percentage = models.PositiveIntegerField(default=120) price = models.FloatField(blank=True) discount_percentage = models.PositiveIntegerField(default=0) #TODO suurused img = models.ImageField() img_2 = models.ImageField(null=True, blank=True) img_3 = models.ImageField(null=True, blank=True) img_4 = models.ImageField(null=True, blank=True) def save(self, *args, **kwargs): if self.price is None: self.price = self.original_price * self.markup_percentage / 100 super(Item, self).save(*args, **kwargs) def __str__(self): if self.discount_percentage == 0: return self.name + " - " + str(self.price) + "€" else: return self.name + " - " + str( self.price*((100-self.discount_percentage)/100) ) + "€ - DISCOUNT " + str(self.discount_percentage) + "%"
flexible
{ "blob_id": "efba815fe64cddb5315b17b2cbaf1d3fc38c11ee", "index": 4995, "step-1": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n", "step-3": "<mask token>\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n", "step-4": "from django.db import models\nimport string\nimport random\n\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n exists = True\n while exists == True:\n ran = ''.join(random.choice(chars) for _ in range(size))\n if len(Item.objects.filter(random_str=ran)) == 0:\n exists = False\n return ran\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n", "step-5": "from django.db import models\nimport string\nimport random\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n\texists = True\n\twhile exists == True:\n\t\tran = ''.join(random.choice(chars) for _ in range(size))\n\t\tif len(Item.objects.filter(random_str=ran)) == 0:\n\t\t\texists = False\n\n\treturn ran\n\n\n\n# Create your models here.\nclass Item(models.Model):\n\tname = models.CharField(max_length=999, unique=True)\n\tdescription = models.TextField(blank=True)\n\trandom_str = models.CharField(max_length=999, default=id_generator)\n\n\toriginal_price = models.FloatField()\n\tmarkup_percentage = models.PositiveIntegerField(default=120)\n\tprice = models.FloatField(blank=True) \n\tdiscount_percentage = models.PositiveIntegerField(default=0)\n\n#TODO suurused\n\n\n\timg = models.ImageField()\n\timg_2 = models.ImageField(null=True, blank=True)\n\timg_3 = models.ImageField(null=True, blank=True)\n\timg_4 = models.ImageField(null=True, blank=True)\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.price is None:\n\t\t\tself.price = self.original_price * self.markup_percentage / 100\n\t\tsuper(Item, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\tif self.discount_percentage == 0:\n\t\t\treturn self.name + \" - \" + str(self.price) + \"€\"\n\t\telse:\n\t\t\treturn self.name + \" - \" + str( self.price*((100-self.discount_percentage)/100) ) + \"€ - DISCOUNT \" + str(self.discount_percentage) + \"%\"", "step-ids": [ 2, 3, 4, 6, 7 ] }
[ 2, 3, 4, 6, 7 ]
<|reserved_special_token_0|> def cast_types(args): args.epochs = int(args.epochs) args.batch_size = int(args.batch_size) args.input_shape = args.input_shape.split(' ') for num in args.input_shape: if num != '': num = int(num) args.input_shape = tuple(args.input_shape) return args def init_model(input_shape): model = keras.Sequential() model.add(keras.layers.Embedding(input_shape[0], 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy']) return model <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def cast_types(args): args.epochs = int(args.epochs) args.batch_size = int(args.batch_size) args.input_shape = args.input_shape.split(' ') for num in args.input_shape: if num != '': num = int(num) args.input_shape = tuple(args.input_shape) return args def init_model(input_shape): model = keras.Sequential() model.add(keras.layers.Embedding(input_shape[0], 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy']) return model def main(args): args = cast_types(args) df = pd.read_csv(args.data) X, y = df.iloc[:, :-1], df.iloc[:, -1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = init_model(X.shape) train_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2) test_metrics = model.evaluate(X_test, y_test) test_loss = float(test_metrics[0]) test_acc = float(test_metrics[1]) exp = Experiment() exp.log_param('test_loss', test_loss) exp.log_param('test_acc', test_acc) model.save('model.h5') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def cast_types(args): args.epochs = int(args.epochs) args.batch_size = int(args.batch_size) args.input_shape = args.input_shape.split(' ') for num in args.input_shape: if num != '': num = int(num) args.input_shape = tuple(args.input_shape) return args def init_model(input_shape): model = keras.Sequential() model.add(keras.layers.Embedding(input_shape[0], 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy']) return model def main(args): args = cast_types(args) df = pd.read_csv(args.data) X, y = df.iloc[:, :-1], df.iloc[:, -1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = init_model(X.shape) train_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2) test_metrics = model.evaluate(X_test, y_test) test_loss = float(test_metrics[0]) test_acc = float(test_metrics[1]) exp = Experiment() exp.log_param('test_loss', test_loss) exp.log_param('test_acc', test_acc) model.save('model.h5') if __name__ == '__main__': parser = argparse.ArgumentParser(description='RNN Classifier') parser.add_argument('--data', action='store', dest='data', required= True, help= 'String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. ' ) parser.add_argument('--project_dir', action='store', dest='project_dir', help='String.') parser.add_argument('--output_dir', action='store', dest='output_dir', help='String.') parser.add_argument('--input_shape', action='store', dest='input_shape', default='10000', help='The shape of the input. Look like: a b c.') parser.add_argument('--epochs', action='store', default='10', dest= 'epochs', help='Number of epochs when training.') parser.add_argument('--batch_size', action='store', default='64', dest= 'batch_size', help='batch size when training.') args = parser.parse_args() main(args) <|reserved_special_token_1|> <|reserved_special_token_0|> import argparse import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from cnvrg import Experiment from sklearn.model_selection import train_test_split def cast_types(args): args.epochs = int(args.epochs) args.batch_size = int(args.batch_size) args.input_shape = args.input_shape.split(' ') for num in args.input_shape: if num != '': num = int(num) args.input_shape = tuple(args.input_shape) return args def init_model(input_shape): model = keras.Sequential() model.add(keras.layers.Embedding(input_shape[0], 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy']) return model def main(args): args = cast_types(args) df = pd.read_csv(args.data) X, y = df.iloc[:, :-1], df.iloc[:, -1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = init_model(X.shape) train_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2) test_metrics = model.evaluate(X_test, y_test) test_loss = float(test_metrics[0]) test_acc = float(test_metrics[1]) exp = Experiment() exp.log_param('test_loss', test_loss) exp.log_param('test_acc', test_acc) model.save('model.h5') if __name__ == '__main__': parser = argparse.ArgumentParser(description='RNN Classifier') parser.add_argument('--data', action='store', dest='data', required= True, help= 'String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. ' ) parser.add_argument('--project_dir', action='store', dest='project_dir', help='String.') parser.add_argument('--output_dir', action='store', dest='output_dir', help='String.') parser.add_argument('--input_shape', action='store', dest='input_shape', default='10000', help='The shape of the input. Look like: a b c.') parser.add_argument('--epochs', action='store', default='10', dest= 'epochs', help='Number of epochs when training.') parser.add_argument('--batch_size', action='store', default='64', dest= 'batch_size', help='batch size when training.') args = parser.parse_args() main(args) <|reserved_special_token_1|> """ All rights reserved to cnvrg.io http://www.cnvrg.io cnvrg.io - Projects Example last update: Nov 07, 2019. ------------- rnn.py ============================================================================== """ import argparse import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from cnvrg import Experiment from sklearn.model_selection import train_test_split def cast_types(args): # epochs. args.epochs = int(args.epochs) # batch_size. args.batch_size = int(args.batch_size) # input_shape. args.input_shape = args.input_shape.split(' ') for num in args.input_shape: if num != '': num = int(num) args.input_shape = tuple(args.input_shape) # ----- # return args def init_model(input_shape): model = keras.Sequential() model.add(keras.layers.Embedding(input_shape[0], 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model def main(args): args = cast_types(args) df = pd.read_csv(args.data) X, y = df.iloc[:, :-1], df.iloc[:, -1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = init_model(X.shape) # <--- Doesn't work with the shape. train_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2) test_metrics = model.evaluate(X_test, y_test) # train_loss = list(np.round(train_metrics.history['loss'], 3)) # train_acc = list(np.round(train_metrics.history['accuracy'], 3)) # val_loss = list(np.round(train_metrics.history['val_loss'], 3)) # val_acc = list(np.round(train_metrics.history['val_accuracy'], 3)) test_loss = float(test_metrics[0]) test_acc = float(test_metrics[1]) exp = Experiment() exp.log_param("test_loss", test_loss) exp.log_param("test_acc", test_acc) model.save("model.h5") if __name__ == '__main__': parser = argparse.ArgumentParser(description="""RNN Classifier""") parser.add_argument('--data', action='store', dest='data', required=True, help="""String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. """) parser.add_argument('--project_dir', action='store', dest='project_dir', help="""String.""") parser.add_argument('--output_dir', action='store', dest='output_dir', help="""String.""") parser.add_argument('--input_shape', action='store', dest='input_shape', default="10000", help="""The shape of the input. Look like: a b c.""") parser.add_argument('--epochs', action='store', default="10", dest='epochs', help="Number of epochs when training.") parser.add_argument('--batch_size', action='store', default="64", dest='batch_size', help="batch size when training.") args = parser.parse_args() main(args)
flexible
{ "blob_id": "fbac2d66f4d69a52c3df5d665b622659e4d8dacd", "index": 5733, "step-1": "<mask token>\n\n\ndef cast_types(args):\n args.epochs = int(args.epochs)\n args.batch_size = int(args.batch_size)\n args.input_shape = args.input_shape.split(' ')\n for num in args.input_shape:\n if num != '':\n num = int(num)\n args.input_shape = tuple(args.input_shape)\n return args\n\n\ndef init_model(input_shape):\n model = keras.Sequential()\n model.add(keras.layers.Embedding(input_shape[0], 16))\n model.add(keras.layers.GlobalAveragePooling1D())\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef cast_types(args):\n args.epochs = int(args.epochs)\n args.batch_size = int(args.batch_size)\n args.input_shape = args.input_shape.split(' ')\n for num in args.input_shape:\n if num != '':\n num = int(num)\n args.input_shape = tuple(args.input_shape)\n return args\n\n\ndef init_model(input_shape):\n model = keras.Sequential()\n model.add(keras.layers.Embedding(input_shape[0], 16))\n model.add(keras.layers.GlobalAveragePooling1D())\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\ndef main(args):\n args = cast_types(args)\n df = pd.read_csv(args.data)\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n model = init_model(X.shape)\n train_metrics = model.fit(X_train, y_train, epochs=args.epochs,\n batch_size=args.batch_size, validation_split=0.2)\n test_metrics = model.evaluate(X_test, y_test)\n test_loss = float(test_metrics[0])\n test_acc = float(test_metrics[1])\n exp = Experiment()\n exp.log_param('test_loss', test_loss)\n exp.log_param('test_acc', test_acc)\n model.save('model.h5')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef cast_types(args):\n args.epochs = int(args.epochs)\n args.batch_size = int(args.batch_size)\n args.input_shape = args.input_shape.split(' ')\n for num in args.input_shape:\n if num != '':\n num = int(num)\n args.input_shape = tuple(args.input_shape)\n return args\n\n\ndef init_model(input_shape):\n model = keras.Sequential()\n model.add(keras.layers.Embedding(input_shape[0], 16))\n model.add(keras.layers.GlobalAveragePooling1D())\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\ndef main(args):\n args = cast_types(args)\n df = pd.read_csv(args.data)\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n model = init_model(X.shape)\n train_metrics = model.fit(X_train, y_train, epochs=args.epochs,\n batch_size=args.batch_size, validation_split=0.2)\n test_metrics = model.evaluate(X_test, y_test)\n test_loss = float(test_metrics[0])\n test_acc = float(test_metrics[1])\n exp = Experiment()\n exp.log_param('test_loss', test_loss)\n exp.log_param('test_acc', test_acc)\n model.save('model.h5')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='RNN Classifier')\n parser.add_argument('--data', action='store', dest='data', required=\n True, help=\n 'String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. '\n )\n parser.add_argument('--project_dir', action='store', dest='project_dir',\n help='String.')\n parser.add_argument('--output_dir', action='store', dest='output_dir',\n help='String.')\n parser.add_argument('--input_shape', action='store', dest='input_shape',\n default='10000', help='The shape of the input. Look like: a b c.')\n parser.add_argument('--epochs', action='store', default='10', dest=\n 'epochs', help='Number of epochs when training.')\n parser.add_argument('--batch_size', action='store', default='64', dest=\n 'batch_size', help='batch size when training.')\n args = parser.parse_args()\n main(args)\n", "step-4": "<mask token>\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom cnvrg import Experiment\nfrom sklearn.model_selection import train_test_split\n\n\ndef cast_types(args):\n args.epochs = int(args.epochs)\n args.batch_size = int(args.batch_size)\n args.input_shape = args.input_shape.split(' ')\n for num in args.input_shape:\n if num != '':\n num = int(num)\n args.input_shape = tuple(args.input_shape)\n return args\n\n\ndef init_model(input_shape):\n model = keras.Sequential()\n model.add(keras.layers.Embedding(input_shape[0], 16))\n model.add(keras.layers.GlobalAveragePooling1D())\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\ndef main(args):\n args = cast_types(args)\n df = pd.read_csv(args.data)\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n model = init_model(X.shape)\n train_metrics = model.fit(X_train, y_train, epochs=args.epochs,\n batch_size=args.batch_size, validation_split=0.2)\n test_metrics = model.evaluate(X_test, y_test)\n test_loss = float(test_metrics[0])\n test_acc = float(test_metrics[1])\n exp = Experiment()\n exp.log_param('test_loss', test_loss)\n exp.log_param('test_acc', test_acc)\n model.save('model.h5')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='RNN Classifier')\n parser.add_argument('--data', action='store', dest='data', required=\n True, help=\n 'String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. '\n )\n parser.add_argument('--project_dir', action='store', dest='project_dir',\n help='String.')\n parser.add_argument('--output_dir', action='store', dest='output_dir',\n help='String.')\n parser.add_argument('--input_shape', action='store', dest='input_shape',\n default='10000', help='The shape of the input. Look like: a b c.')\n parser.add_argument('--epochs', action='store', default='10', dest=\n 'epochs', help='Number of epochs when training.')\n parser.add_argument('--batch_size', action='store', default='64', dest=\n 'batch_size', help='batch size when training.')\n args = parser.parse_args()\n main(args)\n", "step-5": "\"\"\"\nAll rights reserved to cnvrg.io\n http://www.cnvrg.io\n\ncnvrg.io - Projects Example\n\nlast update: Nov 07, 2019.\n-------------\nrnn.py\n==============================================================================\n\"\"\"\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom cnvrg import Experiment\nfrom sklearn.model_selection import train_test_split\n\n\ndef cast_types(args):\n\t# epochs.\n\targs.epochs = int(args.epochs)\n\n\t# batch_size.\n\targs.batch_size = int(args.batch_size)\n\n\t# input_shape.\n\targs.input_shape = args.input_shape.split(' ')\n\tfor num in args.input_shape:\n\t\tif num != '':\n\t\t\tnum = int(num)\n\targs.input_shape = tuple(args.input_shape)\n\n\t# ----- #\n\treturn args\n\n\ndef init_model(input_shape):\n\n\tmodel = keras.Sequential()\n\tmodel.add(keras.layers.Embedding(input_shape[0], 16))\n\tmodel.add(keras.layers.GlobalAveragePooling1D())\n\tmodel.add(keras.layers.Dense(16, activation=tf.nn.relu))\n\tmodel.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\treturn model\n\n\ndef main(args):\n\targs = cast_types(args)\n\n\tdf = pd.read_csv(args.data)\n\tX, y = df.iloc[:, :-1], df.iloc[:, -1]\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n\tmodel = init_model(X.shape) # <--- Doesn't work with the shape.\n\n\ttrain_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2)\n\n\ttest_metrics = model.evaluate(X_test, y_test)\n\n\t# train_loss = list(np.round(train_metrics.history['loss'], 3))\n\t# train_acc = list(np.round(train_metrics.history['accuracy'], 3))\n\t# val_loss = list(np.round(train_metrics.history['val_loss'], 3))\n\t# val_acc = list(np.round(train_metrics.history['val_accuracy'], 3))\n\ttest_loss = float(test_metrics[0])\n\ttest_acc = float(test_metrics[1])\n\n\texp = Experiment()\n\texp.log_param(\"test_loss\", test_loss)\n\texp.log_param(\"test_acc\", test_acc)\n\n\tmodel.save(\"model.h5\")\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=\"\"\"RNN Classifier\"\"\")\n\n\tparser.add_argument('--data', action='store', dest='data', required=True, help=\"\"\"String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. \"\"\")\n\n\tparser.add_argument('--project_dir', action='store', dest='project_dir', help=\"\"\"String.\"\"\")\n\n\tparser.add_argument('--output_dir', action='store', dest='output_dir', help=\"\"\"String.\"\"\")\n\n\tparser.add_argument('--input_shape', action='store', dest='input_shape', default=\"10000\", help=\"\"\"The shape of the input. Look like: a b c.\"\"\")\n\n\tparser.add_argument('--epochs', action='store', default=\"10\", dest='epochs', help=\"Number of epochs when training.\")\n\n\tparser.add_argument('--batch_size', action='store', default=\"64\", dest='batch_size', help=\"batch size when training.\")\n\n\targs = parser.parse_args()\n\n\tmain(args)\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from keras.preprocessing.text import text_to_word_sequence from keras.models import Sequential from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding from keras.layers.recurrent import LSTM from keras.optimizers import Adam, RMSprop #from nltk import FreqDist import numpy as np import os import datetime import re def load_data(train_source, train_dist, test_source, test_dist, max_len, vocab_size): ''' fin = open(test_source, "r") data2 = fin.read() fin.close() fout = open(train_source, "a") fout.write(data2) fout.close() fin = open(test_dist, "r") data2 = fin.read() fin.close() fout = open(train_dist, "a") fout.write(data2) fout.close() ''' # Reading raw text from source and destination files f = open(train_source, 'r') X_data = f.read() f.close() f = open(train_dist, 'r') y_data = f.read() f.close() # Splitting raw text into array of sequences X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len] y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len] #Check or Create Vocab vocab_files = [f for f in os.listdir('.') if 'vocab' in f] x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+') y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+') if len(vocab_files) == 0: vocab_x = {} for line in X: for token in line: if not token in vocab_x: vocab_x[token] = 0 vocab_x[token] += 1 X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True) X_vocab = X_vocab[0:(vocab_size)] for (i, item) in enumerate(X_vocab): if item == "newlinechar": X_vocab[i] = "-" for item in X_vocab: print>>x_vocab_file, item x_vocab_file.close() vocab_y = {} for line in y: for token in line: if not token in vocab_y: vocab_y[token] = 0 vocab_y[token] += 1 y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True) y_vocab = y_vocab[0:(vocab_size)] for (i, item) in enumerate(y_vocab): if item == "newlinechar": y_vocab[i] = "-" for item in y_vocab: print>>y_vocab_file, item y_vocab_file.close() else: X_vocab = x_vocab_file.read().splitlines() y_vocab = y_vocab_file.read().splitlines() # Creating the vocabulary set with the most common words #dist = FreqDist(np.hstack(X)) #X_vocab = dist.most_common(vocab_size-1) #dist = FreqDist(np.hstack(y)) #y_vocab = dist.most_common(vocab_size-1) # Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary X_ix_to_word = X_vocab # Adding the word "ZERO" to the beginning of the array X_ix_to_word.insert(0, 'ZERO') # Adding the word 'UNK' to the end of the array (stands for UNKNOWN words) X_ix_to_word.append('UNK') # Creating the word-to-index dictionary from the array created above #X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)} X_word_to_ix = dict((map(reversed, enumerate(X_ix_to_word)))) # Converting each word to its index value for i, sentence in enumerate(X): for j, word in enumerate(sentence): if word in X_word_to_ix: X[i][j] = X_word_to_ix[word] else: X[i][j] = X_word_to_ix['UNK'] y_ix_to_word = y_vocab y_ix_to_word.insert(0, 'ZERO') y_ix_to_word.append('UNK') #y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)} y_word_to_ix = dict((map(reversed, enumerate(y_ix_to_word)))) for i, sentence in enumerate(y): for j, word in enumerate(sentence): if word in y_word_to_ix: y[i][j] = y_word_to_ix[word] else: y[i][j] = y_word_to_ix['UNK'] return (X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab), y_word_to_ix, y_ix_to_word) def load_test_data(source, X_word_to_ix, max_len): f = open(source, 'r') X_data = f.read() f.close() X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(x) > 0 and len(x) <= max_len] for i, sentence in enumerate(X): for j, word in enumerate(sentence): if word in X_word_to_ix: X[i][j] = X_word_to_ix[word] else: X[i][j] = X_word_to_ix['UNK'] return X def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers): model = Sequential() # Creating encoder network model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True)) model.add(LSTM(hidden_size)) model.add(RepeatVector(y_max_len)) # Creating decoder network for _ in range(num_layers): model.add(LSTM(hidden_size, return_sequences=True)) model.add(TimeDistributed(Dense(y_vocab_len))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model def process_data(word_sentences, max_len, word_to_ix): # Vectorizing each element in each sequence sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix))) for i, sentence in enumerate(word_sentences): for j, word in enumerate(sentence): sequences[i, j, word] = 1. return sequences def find_checkpoint_file(folder): checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f] if len(checkpoint_file) == 0: return [] modified_time = [os.path.getmtime(f) for f in checkpoint_file] return checkpoint_file[np.argmax(modified_time)]
normal
{ "blob_id": "2962ef1d7ecd4e8d472b9dc36664e4e8745391fd", "index": 3616, "step-1": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\n<mask token>\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n", "step-4": "from keras.preprocessing.text import text_to_word_sequence\nfrom keras.models import Sequential\nfrom keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import Adam, RMSprop\nimport numpy as np\nimport os\nimport datetime\nimport re\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n", "step-5": "from keras.preprocessing.text import text_to_word_sequence\nfrom keras.models import Sequential\nfrom keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import Adam, RMSprop\n#from nltk import FreqDist\nimport numpy as np\nimport os\nimport datetime\nimport re\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len, vocab_size):\n '''\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n '''\n \n # Reading raw text from source and destination files\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n\n # Splitting raw text into array of sequences\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'), y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'), y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]\n\n #Check or Create Vocab \n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0: \n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:(vocab_size)]\n for (i, item) in enumerate(X_vocab):\n if item == \"newlinechar\":\n X_vocab[i] = \"-\"\n for item in X_vocab:\n print>>x_vocab_file, item\n x_vocab_file.close()\n\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:(vocab_size)]\n for (i, item) in enumerate(y_vocab):\n if item == \"newlinechar\":\n y_vocab[i] = \"-\"\n for item in y_vocab:\n print>>y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n \n # Creating the vocabulary set with the most common words\n #dist = FreqDist(np.hstack(X))\n #X_vocab = dist.most_common(vocab_size-1)\n #dist = FreqDist(np.hstack(y))\n #y_vocab = dist.most_common(vocab_size-1)\n\n # Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary\n X_ix_to_word = X_vocab\n # Adding the word \"ZERO\" to the beginning of the array\n X_ix_to_word.insert(0, 'ZERO')\n # Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)\n X_ix_to_word.append('UNK')\n\n # Creating the word-to-index dictionary from the array created above\n #X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)}\n \n X_word_to_ix = dict((map(reversed, enumerate(X_ix_to_word))))\n\n # Converting each word to its index value\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n \n #y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)}\n \n y_word_to_ix = dict((map(reversed, enumerate(y_ix_to_word))))\n \n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n \n return (X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab), y_word_to_ix, y_ix_to_word)\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers):\n model = Sequential()\n\n # Creating encoder network\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n\n # Creating decoder network\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\ndef process_data(word_sentences, max_len, word_to_ix):\n # Vectorizing each element in each sequence\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.\n return sequences\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(len(s)): ans[s[i]] = sa[i] <|reserved_special_token_0|> for k in ans: S.add(k) <|reserved_special_token_0|> for i in range(1, len(L)): s = L[i] S = '' for j in range(len(s)): if s[j] == '\n': continue S += ans[s[j]] tc += 1 print('Case #', tc, ': ', S, sep='') <|reserved_special_token_1|> s = ( 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv' ) sa = ( 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up' ) ans = {} for i in range(len(s)): ans[s[i]] = sa[i] S = set([]) for k in ans: S.add(k) ans['q'] = 'z' ans['z'] = 'q' f = open('A-small-attempt0.in', 'r') L = f.readlines() tc = 0 for i in range(1, len(L)): s = L[i] S = '' for j in range(len(s)): if s[j] == '\n': continue S += ans[s[j]] tc += 1 print('Case #', tc, ': ', S, sep='') <|reserved_special_token_1|> s = 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv' sa = 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up' ans = {} for i in range(len(s)): ans[s[i]] = sa[i]; S = set([]) for k in ans: S.add(k) #for w in range(26): # if chr(w+97) not in S: # print chr(w+97) # q and z not in input so they must map to each other ans['q'] = 'z' ans['z'] = 'q' f = open('A-small-attempt0.in', 'r') L = f.readlines() tc = 0 for i in range(1, len(L)): s = L[i] S = '' for j in range(len(s)): if s[j] == '\n': continue S += ans[s[j]] tc += 1 print('Case #',tc,': ',S,sep='')
flexible
{ "blob_id": "77b9b111cfb4d0b54e14b2aab81b7b05fd6bbccd", "index": 8552, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(len(s)):\n ans[s[i]] = sa[i]\n<mask token>\nfor k in ans:\n S.add(k)\n<mask token>\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #', tc, ': ', S, sep='')\n", "step-3": "s = (\n 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'\n )\nsa = (\n 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'\n )\nans = {}\nfor i in range(len(s)):\n ans[s[i]] = sa[i]\nS = set([])\nfor k in ans:\n S.add(k)\nans['q'] = 'z'\nans['z'] = 'q'\nf = open('A-small-attempt0.in', 'r')\nL = f.readlines()\ntc = 0\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #', tc, ': ', S, sep='')\n", "step-4": "s = 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'\nsa = 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'\n\nans = {}\nfor i in range(len(s)):\n ans[s[i]] = sa[i];\nS = set([])\nfor k in ans:\n S.add(k)\n#for w in range(26):\n# if chr(w+97) not in S:\n# print chr(w+97)\n\n# q and z not in input so they must map to each other\nans['q'] = 'z'\nans['z'] = 'q'\n\nf = open('A-small-attempt0.in', 'r')\nL = f.readlines()\ntc = 0\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #',tc,': ',S,sep='')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python #-*- coding: utf-8 -*- import pygtk pygtk.require("2.0") import gtk from testarMsg import * class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file("../tg.glade") self.window = builder.get_object("window1") self.text_area = builder.get_object("text_entry") self.window.show() self.opcao = "" builder.connect_signals({"gtk_main_quit": gtk.main_quit, "on_button_analisar_clicked": self.analisar_frase, "on_button_clear_clicked": self.clear_text, "on_button_dilma_clicked": self.opcao_dilma, "on_button_copa_clicked": self.opcao_copa, "on_button_palmeiras_clicked": self.opcao_palmeiras, "on_button_fatec_clicked": self.opcao_fatec, "on_sad_show": self.sad_show, }) def analisar_frase(self, widget): """Função: analisar a frase que o usuário""" frase = self.text_area.get_text() if ( frase != ""): frase_proc= normalizar(frase) self.text_area.set_text(frase) if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'): print("Opcao: %s "%self.opcao) featureList = gera_lista_features(self.opcao) lista_feature_fell = get_lista_feature_fell() features_msg = getFeatureVector(frase_proc) training_set = apply_features(extract_features,lista_feature_fell) fell = avaliar_Sentimento(features_msg,training_set) print ("Sentimento: %s "%fell) def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text("") def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao="dilma" def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao="copa" def opcao_palmeiras(self, widget): """Função: para definir a opcao Palmeiras""" self.opcao="palmeiras" def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao="fatec" def sad_show(self,widget): """Função: para definir se imagem Sad ira aparecer""" self.visible=True if __name__ == "__main__": app = tgApp() gtk.main()
normal
{ "blob_id": "6b6fac3bfb1b1478dd491fc4dd9c45a19aeb7bd8", "index": 6102, "step-1": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n <mask token>\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n <mask token>\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n <mask token>\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n\n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if frase != '':\n frase_proc = normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao ==\n 'palmeiras' or self.opcao == 'fatec'):\n print('Opcao: %s ' % self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,\n lista_feature_fell)\n fell = avaliar_Sentimento(features_msg, training_set)\n print('Sentimento: %s ' % fell)\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\n<mask token>\n", "step-4": "<mask token>\npygtk.require('2.0')\n<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n\n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if frase != '':\n frase_proc = normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao ==\n 'palmeiras' or self.opcao == 'fatec'):\n print('Opcao: %s ' % self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,\n lista_feature_fell)\n fell = avaliar_Sentimento(features_msg, training_set)\n print('Sentimento: %s ' % fell)\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\nif __name__ == '__main__':\n app = tgApp()\n gtk.main()\n", "step-5": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport pygtk\npygtk.require(\"2.0\")\nimport gtk\nfrom testarMsg import *\n\n\nclass tgApp(object):\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file(\"../tg.glade\")\n self.window = builder.get_object(\"window1\")\n self.text_area = builder.get_object(\"text_entry\")\n self.window.show()\n self.opcao = \"\"\n builder.connect_signals({\"gtk_main_quit\": gtk.main_quit,\n \"on_button_analisar_clicked\": self.analisar_frase,\n \"on_button_clear_clicked\": self.clear_text,\n \"on_button_dilma_clicked\": self.opcao_dilma,\n \"on_button_copa_clicked\": self.opcao_copa,\n \"on_button_palmeiras_clicked\": self.opcao_palmeiras,\n \"on_button_fatec_clicked\": self.opcao_fatec,\n \"on_sad_show\": self.sad_show,\n })\n \n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if ( frase != \"\"):\n frase_proc= normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'):\n print(\"Opcao: %s \"%self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,lista_feature_fell)\n fell = avaliar_Sentimento(features_msg,training_set)\n print (\"Sentimento: %s \"%fell)\n \n \n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text(\"\")\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao=\"dilma\"\n \n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao=\"copa\"\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao=\"palmeiras\"\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao=\"fatec\"\n \n def sad_show(self,widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible=True\n\n \nif __name__ == \"__main__\":\n \n app = tgApp()\n gtk.main()\n \n \n", "step-ids": [ 6, 8, 9, 10, 12 ] }
[ 6, 8, 9, 10, 12 ]
from compass import models from compass.models.MetabolicModel import MetabolicModel def test_sbml_3(): model = models.load_metabolic_model("RECON1_xml") assert isinstance(model, MetabolicModel) assert len(model.reactions) == 3742 assert len(model.species) == 2766 def test_sbml_2(): model = models.load_metabolic_model("RECON2.2") assert isinstance(model, MetabolicModel) assert len(model.reactions) == 7785 assert len(model.species) == 6047 def test_mat(): model = models.load_metabolic_model("RECON2_mat") assert isinstance(model, MetabolicModel) assert len(model.reactions) == 7440 assert len(model.species) == 5063 def test_to_json(): model = models.load_metabolic_model("RECON2.2") json = model.to_JSON() assert isinstance(json, str) model = models.load_metabolic_model("RECON1_xml") json = model.to_JSON() assert isinstance(json, str) model = models.load_metabolic_model("RECON2_mat") json = model.to_JSON() assert isinstance(json, str)
normal
{ "blob_id": "863bae04a90143ed942a478c4b71a2269e123bb5", "index": 2980, "step-1": "<mask token>\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n", "step-2": "<mask token>\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\n<mask token>\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n", "step-3": "<mask token>\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model('RECON2.2')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n", "step-4": "from compass import models\nfrom compass.models.MetabolicModel import MetabolicModel\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model('RECON1_xml')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model('RECON2.2')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model('RECON2_mat')\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model('RECON2.2')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON1_xml')\n json = model.to_JSON()\n assert isinstance(json, str)\n model = models.load_metabolic_model('RECON2_mat')\n json = model.to_JSON()\n assert isinstance(json, str)\n", "step-5": "from compass import models\nfrom compass.models.MetabolicModel import MetabolicModel\n\n\ndef test_sbml_3():\n model = models.load_metabolic_model(\"RECON1_xml\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 3742\n assert len(model.species) == 2766\n\n\ndef test_sbml_2():\n model = models.load_metabolic_model(\"RECON2.2\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7785\n assert len(model.species) == 6047\n\n\ndef test_mat():\n model = models.load_metabolic_model(\"RECON2_mat\")\n assert isinstance(model, MetabolicModel)\n assert len(model.reactions) == 7440\n assert len(model.species) == 5063\n\n\ndef test_to_json():\n model = models.load_metabolic_model(\"RECON2.2\")\n json = model.to_JSON()\n assert isinstance(json, str)\n\n model = models.load_metabolic_model(\"RECON1_xml\")\n json = model.to_JSON()\n assert isinstance(json, str)\n\n model = models.load_metabolic_model(\"RECON2_mat\")\n json = model.to_JSON()\n assert isinstance(json, str)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def _build_default_components_text(): text = '' for c in DEFAULT_COMPONENTS: text += c + '\n' return text <|reserved_special_token_0|> def autentificate_user(request): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) data = {'error': False} if user is not None: if user.is_active: login(request, user) else: data['error'] = True data['error_message'] = 'Этот аккаутн заблокирован.' else: data['error'] = True data['error_message'] = 'Неправильный логин или пароль.' return JsonResponse(data) <|reserved_special_token_0|> def load_user_data(request): user = request.user data = {'error': False, 'is_autentificated': False} if user.is_authenticated(): diagrams = Diagram.objects.filter(author=user) data['html_text'] = render_to_string('account_data.html', { 'diagrams': diagrams, 'username': user.username}) data['is_autentificated'] = True else: data['error'] = True data['error_message'] = 'Пользователь не выполнил вход в аккаунт.' return JsonResponse(data) <|reserved_special_token_0|> def diagen_main(request): return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def save_diagram_for_user(request): data = {'error': False} if request.user.is_authenticated(): code = request.POST['code'] file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name title = request.POST['title'] diagram = Diagram.objects.filter(author=request.user, title=title) if diagram.count() == 1: diagram = diagram[0] diagram.image_url = url diagram.text = code diagram.save() data['message'] = 'Диаграмма успешно обновлена.' else: new_diagram = Diagram.objects.create(title=title, author= request.user, text=code, image_url=url) if new_diagram != None: data['message'] = 'Диаграмма успешно сохранена.' else: data['error'] = True data['error_message'] = 'Не получилось сохранить диаграмму.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def delete_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: diagram.delete() else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def get_diagram(request): responce = {} try: file_name = build_diagram_from_code(request.POST['code']) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) <|reserved_special_token_0|> def _parse_text_to_lines(text): lines = [] for line in text.split('\n'): if _words_number(line) == 1: lines.append(line) return lines <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def _build_default_components_text(): text = '' for c in DEFAULT_COMPONENTS: text += c + '\n' return text <|reserved_special_token_0|> def autentificate_user(request): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) data = {'error': False} if user is not None: if user.is_active: login(request, user) else: data['error'] = True data['error_message'] = 'Этот аккаутн заблокирован.' else: data['error'] = True data['error_message'] = 'Неправильный логин или пароль.' return JsonResponse(data) <|reserved_special_token_0|> def load_user_data(request): user = request.user data = {'error': False, 'is_autentificated': False} if user.is_authenticated(): diagrams = Diagram.objects.filter(author=user) data['html_text'] = render_to_string('account_data.html', { 'diagrams': diagrams, 'username': user.username}) data['is_autentificated'] = True else: data['error'] = True data['error_message'] = 'Пользователь не выполнил вход в аккаунт.' return JsonResponse(data) def load_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: data['code'] = diagram.text data['url'] = diagram.image_url data['title'] = diagram.title else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def diagen_main(request): return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def save_diagram_for_user(request): data = {'error': False} if request.user.is_authenticated(): code = request.POST['code'] file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name title = request.POST['title'] diagram = Diagram.objects.filter(author=request.user, title=title) if diagram.count() == 1: diagram = diagram[0] diagram.image_url = url diagram.text = code diagram.save() data['message'] = 'Диаграмма успешно обновлена.' else: new_diagram = Diagram.objects.create(title=title, author= request.user, text=code, image_url=url) if new_diagram != None: data['message'] = 'Диаграмма успешно сохранена.' else: data['error'] = True data['error_message'] = 'Не получилось сохранить диаграмму.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def delete_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: diagram.delete() else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def get_diagram(request): responce = {} try: file_name = build_diagram_from_code(request.POST['code']) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) <|reserved_special_token_0|> def _parse_text_to_lines(text): lines = [] for line in text.split('\n'): if _words_number(line) == 1: lines.append(line) return lines def _words_number(line): words = re.findall('[\\w]+', line) return len(words) <|reserved_special_token_1|> <|reserved_special_token_0|> def _build_default_components_text(): text = '' for c in DEFAULT_COMPONENTS: text += c + '\n' return text <|reserved_special_token_0|> def autentificate_user(request): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) data = {'error': False} if user is not None: if user.is_active: login(request, user) else: data['error'] = True data['error_message'] = 'Этот аккаутн заблокирован.' else: data['error'] = True data['error_message'] = 'Неправильный логин или пароль.' return JsonResponse(data) def logout_user(request): logout(request) return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def registrate_user(request): username = request.POST['username'] password = request.POST['password'] try: new_user = User.objects.create_user(username, password=password) except Exception: return JsonResponse({'error': True, 'error_message': 'Пользователь с таким именем уже существует.'}) else: return JsonResponse({'error': False}) def load_user_data(request): user = request.user data = {'error': False, 'is_autentificated': False} if user.is_authenticated(): diagrams = Diagram.objects.filter(author=user) data['html_text'] = render_to_string('account_data.html', { 'diagrams': diagrams, 'username': user.username}) data['is_autentificated'] = True else: data['error'] = True data['error_message'] = 'Пользователь не выполнил вход в аккаунт.' return JsonResponse(data) def load_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: data['code'] = diagram.text data['url'] = diagram.image_url data['title'] = diagram.title else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def diagen_main(request): return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def save_diagram_for_user(request): data = {'error': False} if request.user.is_authenticated(): code = request.POST['code'] file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name title = request.POST['title'] diagram = Diagram.objects.filter(author=request.user, title=title) if diagram.count() == 1: diagram = diagram[0] diagram.image_url = url diagram.text = code diagram.save() data['message'] = 'Диаграмма успешно обновлена.' else: new_diagram = Diagram.objects.create(title=title, author= request.user, text=code, image_url=url) if new_diagram != None: data['message'] = 'Диаграмма успешно сохранена.' else: data['error'] = True data['error_message'] = 'Не получилось сохранить диаграмму.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def delete_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: diagram.delete() else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def get_diagram(request): responce = {} try: file_name = build_diagram_from_code(request.POST['code']) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def generate_diagram(request): responce = {} text = request.POST['text'] component_types = _parse_text_to_lines(request.POST['component_types']) component_names = _parse_text_to_lines(request.POST['component_names']) try: code = convert_text_to_code(text, component_types, component_names) file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'code': code, 'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def _parse_text_to_lines(text): lines = [] for line in text.split('\n'): if _words_number(line) == 1: lines.append(line) return lines def _words_number(line): words = re.findall('[\\w]+', line) return len(words) <|reserved_special_token_1|> <|reserved_special_token_0|> def _build_default_components_text(): text = '' for c in DEFAULT_COMPONENTS: text += c + '\n' return text DEFAULT_COMPONENTS_TEXT = _build_default_components_text() SERV_FULL_ADDRESS = 'http://127.0.0.1:8000/' def autentificate_user(request): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) data = {'error': False} if user is not None: if user.is_active: login(request, user) else: data['error'] = True data['error_message'] = 'Этот аккаутн заблокирован.' else: data['error'] = True data['error_message'] = 'Неправильный логин или пароль.' return JsonResponse(data) def logout_user(request): logout(request) return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def registrate_user(request): username = request.POST['username'] password = request.POST['password'] try: new_user = User.objects.create_user(username, password=password) except Exception: return JsonResponse({'error': True, 'error_message': 'Пользователь с таким именем уже существует.'}) else: return JsonResponse({'error': False}) def load_user_data(request): user = request.user data = {'error': False, 'is_autentificated': False} if user.is_authenticated(): diagrams = Diagram.objects.filter(author=user) data['html_text'] = render_to_string('account_data.html', { 'diagrams': diagrams, 'username': user.username}) data['is_autentificated'] = True else: data['error'] = True data['error_message'] = 'Пользователь не выполнил вход в аккаунт.' return JsonResponse(data) def load_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: data['code'] = diagram.text data['url'] = diagram.image_url data['title'] = diagram.title else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def diagen_main(request): return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def save_diagram_for_user(request): data = {'error': False} if request.user.is_authenticated(): code = request.POST['code'] file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name title = request.POST['title'] diagram = Diagram.objects.filter(author=request.user, title=title) if diagram.count() == 1: diagram = diagram[0] diagram.image_url = url diagram.text = code diagram.save() data['message'] = 'Диаграмма успешно обновлена.' else: new_diagram = Diagram.objects.create(title=title, author= request.user, text=code, image_url=url) if new_diagram != None: data['message'] = 'Диаграмма успешно сохранена.' else: data['error'] = True data['error_message'] = 'Не получилось сохранить диаграмму.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def delete_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: diagram.delete() else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def get_diagram(request): responce = {} try: file_name = build_diagram_from_code(request.POST['code']) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def generate_diagram(request): responce = {} text = request.POST['text'] component_types = _parse_text_to_lines(request.POST['component_types']) component_names = _parse_text_to_lines(request.POST['component_names']) try: code = convert_text_to_code(text, component_types, component_names) file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'code': code, 'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def _parse_text_to_lines(text): lines = [] for line in text.split('\n'): if _words_number(line) == 1: lines.append(line) return lines def _words_number(line): words = re.findall('[\\w]+', line) return len(words) <|reserved_special_token_1|> from django.shortcuts import render, get_object_or_404 from django.template.loader import render_to_string from django.http import JsonResponse from django.contrib.auth.models import User from diagen.utils.DiagramCreator import build_diagram_from_code from diagen.utils.TextConverter import convert_text_to_code from diagen.utils.extraction.ComponentsExtractor import DEFAULT_COMPONENTS from django.contrib.auth import authenticate, login, logout from .models import * import time import re def _build_default_components_text(): text = '' for c in DEFAULT_COMPONENTS: text += c + '\n' return text DEFAULT_COMPONENTS_TEXT = _build_default_components_text() SERV_FULL_ADDRESS = 'http://127.0.0.1:8000/' def autentificate_user(request): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) data = {'error': False} if user is not None: if user.is_active: login(request, user) else: data['error'] = True data['error_message'] = 'Этот аккаутн заблокирован.' else: data['error'] = True data['error_message'] = 'Неправильный логин или пароль.' return JsonResponse(data) def logout_user(request): logout(request) return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def registrate_user(request): username = request.POST['username'] password = request.POST['password'] try: new_user = User.objects.create_user(username, password=password) except Exception: return JsonResponse({'error': True, 'error_message': "Пользователь с таким именем уже существует."}) else: return JsonResponse({'error': False}) def load_user_data(request): user = request.user data = {'error': False, 'is_autentificated': False} if user.is_authenticated(): diagrams = Diagram.objects.filter(author=user) data['html_text'] = render_to_string('account_data.html', { "diagrams": diagrams, 'username': user.username }) data['is_autentificated'] = True else: data['error'] = True data['error_message'] = 'Пользователь не выполнил вход в аккаунт.' return JsonResponse(data) def load_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: data['code'] = diagram.text data['url'] = diagram.image_url data['title'] = diagram.title else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def diagen_main(request): return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT}) def save_diagram_for_user(request): data = {'error': False} if request.user.is_authenticated(): code = request.POST['code'] file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name title = request.POST['title'] diagram = Diagram.objects.filter(author=request.user, title=title) if diagram.count() == 1: diagram = diagram[0] diagram.image_url = url diagram.text = code diagram.save() data['message'] = 'Диаграмма успешно обновлена.' else: new_diagram = Diagram.objects.create(title=title, author=request.user, text=code, image_url=url) if new_diagram != None: data['message'] = 'Диаграмма успешно сохранена.' else: data['error'] = True data['error_message'] = 'Не получилось сохранить диаграмму.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def delete_user_diagram(request): user = request.user pk = request.POST['pk'] data = {'error': False} if user.is_authenticated(): diagram = get_object_or_404(Diagram, id=pk) if diagram.author.id == user.id: diagram.delete() else: data['error'] = True data['error_message'] = 'Недостаточно прав для данного действия.' else: data['error'] = True data['error_message'] = 'Пользователь не вошел в систему.' return JsonResponse(data) def get_diagram(request): responce = {} try: file_name = build_diagram_from_code(request.POST['code']) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def generate_diagram(request): responce = {} text = request.POST['text'] component_types = _parse_text_to_lines(request.POST['component_types']) component_names = _parse_text_to_lines(request.POST['component_names']) try: code = convert_text_to_code(text, component_types, component_names) file_name = build_diagram_from_code(code) url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name responce = {'code': code, 'image_url': url} except Exception as e: responce = {'error': 'true', 'message': str(e)} return JsonResponse(responce) def _parse_text_to_lines(text): lines = [] for line in text.split('\n'): if _words_number(line) == 1: lines.append(line) return lines def _words_number(line): words = re.findall(r"[\w]+", line) return len(words)
flexible
{ "blob_id": "fbbf27f063f6d866e5d0b1210ea9acaebb3bdfb4", "index": 4398, "step-1": "<mask token>\n\n\ndef _build_default_components_text():\n text = ''\n for c in DEFAULT_COMPONENTS:\n text += c + '\\n'\n return text\n\n\n<mask token>\n\n\ndef autentificate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n data = {'error': False}\n if user is not None:\n if user.is_active:\n login(request, user)\n else:\n data['error'] = True\n data['error_message'] = 'Этот аккаутн заблокирован.'\n else:\n data['error'] = True\n data['error_message'] = 'Неправильный логин или пароль.'\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_user_data(request):\n user = request.user\n data = {'error': False, 'is_autentificated': False}\n if user.is_authenticated():\n diagrams = Diagram.objects.filter(author=user)\n data['html_text'] = render_to_string('account_data.html', {\n 'diagrams': diagrams, 'username': user.username})\n data['is_autentificated'] = True\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не выполнил вход в аккаунт.'\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef diagen_main(request):\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef save_diagram_for_user(request):\n data = {'error': False}\n if request.user.is_authenticated():\n code = request.POST['code']\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n title = request.POST['title']\n diagram = Diagram.objects.filter(author=request.user, title=title)\n if diagram.count() == 1:\n diagram = diagram[0]\n diagram.image_url = url\n diagram.text = code\n diagram.save()\n data['message'] = 'Диаграмма успешно обновлена.'\n else:\n new_diagram = Diagram.objects.create(title=title, author=\n request.user, text=code, image_url=url)\n if new_diagram != None:\n data['message'] = 'Диаграмма успешно сохранена.'\n else:\n data['error'] = True\n data['error_message'] = 'Не получилось сохранить диаграмму.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef delete_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n diagram.delete()\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef get_diagram(request):\n responce = {}\n try:\n file_name = build_diagram_from_code(request.POST['code'])\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\n<mask token>\n\n\ndef _parse_text_to_lines(text):\n lines = []\n for line in text.split('\\n'):\n if _words_number(line) == 1:\n lines.append(line)\n return lines\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef _build_default_components_text():\n text = ''\n for c in DEFAULT_COMPONENTS:\n text += c + '\\n'\n return text\n\n\n<mask token>\n\n\ndef autentificate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n data = {'error': False}\n if user is not None:\n if user.is_active:\n login(request, user)\n else:\n data['error'] = True\n data['error_message'] = 'Этот аккаутн заблокирован.'\n else:\n data['error'] = True\n data['error_message'] = 'Неправильный логин или пароль.'\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_user_data(request):\n user = request.user\n data = {'error': False, 'is_autentificated': False}\n if user.is_authenticated():\n diagrams = Diagram.objects.filter(author=user)\n data['html_text'] = render_to_string('account_data.html', {\n 'diagrams': diagrams, 'username': user.username})\n data['is_autentificated'] = True\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не выполнил вход в аккаунт.'\n return JsonResponse(data)\n\n\ndef load_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n data['code'] = diagram.text\n data['url'] = diagram.image_url\n data['title'] = diagram.title\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef diagen_main(request):\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef save_diagram_for_user(request):\n data = {'error': False}\n if request.user.is_authenticated():\n code = request.POST['code']\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n title = request.POST['title']\n diagram = Diagram.objects.filter(author=request.user, title=title)\n if diagram.count() == 1:\n diagram = diagram[0]\n diagram.image_url = url\n diagram.text = code\n diagram.save()\n data['message'] = 'Диаграмма успешно обновлена.'\n else:\n new_diagram = Diagram.objects.create(title=title, author=\n request.user, text=code, image_url=url)\n if new_diagram != None:\n data['message'] = 'Диаграмма успешно сохранена.'\n else:\n data['error'] = True\n data['error_message'] = 'Не получилось сохранить диаграмму.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef delete_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n diagram.delete()\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef get_diagram(request):\n responce = {}\n try:\n file_name = build_diagram_from_code(request.POST['code'])\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\n<mask token>\n\n\ndef _parse_text_to_lines(text):\n lines = []\n for line in text.split('\\n'):\n if _words_number(line) == 1:\n lines.append(line)\n return lines\n\n\ndef _words_number(line):\n words = re.findall('[\\\\w]+', line)\n return len(words)\n", "step-3": "<mask token>\n\n\ndef _build_default_components_text():\n text = ''\n for c in DEFAULT_COMPONENTS:\n text += c + '\\n'\n return text\n\n\n<mask token>\n\n\ndef autentificate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n data = {'error': False}\n if user is not None:\n if user.is_active:\n login(request, user)\n else:\n data['error'] = True\n data['error_message'] = 'Этот аккаутн заблокирован.'\n else:\n data['error'] = True\n data['error_message'] = 'Неправильный логин или пароль.'\n return JsonResponse(data)\n\n\ndef logout_user(request):\n logout(request)\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef registrate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n try:\n new_user = User.objects.create_user(username, password=password)\n except Exception:\n return JsonResponse({'error': True, 'error_message':\n 'Пользователь с таким именем уже существует.'})\n else:\n return JsonResponse({'error': False})\n\n\ndef load_user_data(request):\n user = request.user\n data = {'error': False, 'is_autentificated': False}\n if user.is_authenticated():\n diagrams = Diagram.objects.filter(author=user)\n data['html_text'] = render_to_string('account_data.html', {\n 'diagrams': diagrams, 'username': user.username})\n data['is_autentificated'] = True\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не выполнил вход в аккаунт.'\n return JsonResponse(data)\n\n\ndef load_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n data['code'] = diagram.text\n data['url'] = diagram.image_url\n data['title'] = diagram.title\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef diagen_main(request):\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef save_diagram_for_user(request):\n data = {'error': False}\n if request.user.is_authenticated():\n code = request.POST['code']\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n title = request.POST['title']\n diagram = Diagram.objects.filter(author=request.user, title=title)\n if diagram.count() == 1:\n diagram = diagram[0]\n diagram.image_url = url\n diagram.text = code\n diagram.save()\n data['message'] = 'Диаграмма успешно обновлена.'\n else:\n new_diagram = Diagram.objects.create(title=title, author=\n request.user, text=code, image_url=url)\n if new_diagram != None:\n data['message'] = 'Диаграмма успешно сохранена.'\n else:\n data['error'] = True\n data['error_message'] = 'Не получилось сохранить диаграмму.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef delete_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n diagram.delete()\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef get_diagram(request):\n responce = {}\n try:\n file_name = build_diagram_from_code(request.POST['code'])\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\ndef generate_diagram(request):\n responce = {}\n text = request.POST['text']\n component_types = _parse_text_to_lines(request.POST['component_types'])\n component_names = _parse_text_to_lines(request.POST['component_names'])\n try:\n code = convert_text_to_code(text, component_types, component_names)\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'code': code, 'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\ndef _parse_text_to_lines(text):\n lines = []\n for line in text.split('\\n'):\n if _words_number(line) == 1:\n lines.append(line)\n return lines\n\n\ndef _words_number(line):\n words = re.findall('[\\\\w]+', line)\n return len(words)\n", "step-4": "<mask token>\n\n\ndef _build_default_components_text():\n text = ''\n for c in DEFAULT_COMPONENTS:\n text += c + '\\n'\n return text\n\n\nDEFAULT_COMPONENTS_TEXT = _build_default_components_text()\nSERV_FULL_ADDRESS = 'http://127.0.0.1:8000/'\n\n\ndef autentificate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n data = {'error': False}\n if user is not None:\n if user.is_active:\n login(request, user)\n else:\n data['error'] = True\n data['error_message'] = 'Этот аккаутн заблокирован.'\n else:\n data['error'] = True\n data['error_message'] = 'Неправильный логин или пароль.'\n return JsonResponse(data)\n\n\ndef logout_user(request):\n logout(request)\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef registrate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n try:\n new_user = User.objects.create_user(username, password=password)\n except Exception:\n return JsonResponse({'error': True, 'error_message':\n 'Пользователь с таким именем уже существует.'})\n else:\n return JsonResponse({'error': False})\n\n\ndef load_user_data(request):\n user = request.user\n data = {'error': False, 'is_autentificated': False}\n if user.is_authenticated():\n diagrams = Diagram.objects.filter(author=user)\n data['html_text'] = render_to_string('account_data.html', {\n 'diagrams': diagrams, 'username': user.username})\n data['is_autentificated'] = True\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не выполнил вход в аккаунт.'\n return JsonResponse(data)\n\n\ndef load_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n data['code'] = diagram.text\n data['url'] = diagram.image_url\n data['title'] = diagram.title\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef diagen_main(request):\n return render(request, 'index.html', {'components':\n DEFAULT_COMPONENTS_TEXT})\n\n\ndef save_diagram_for_user(request):\n data = {'error': False}\n if request.user.is_authenticated():\n code = request.POST['code']\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n title = request.POST['title']\n diagram = Diagram.objects.filter(author=request.user, title=title)\n if diagram.count() == 1:\n diagram = diagram[0]\n diagram.image_url = url\n diagram.text = code\n diagram.save()\n data['message'] = 'Диаграмма успешно обновлена.'\n else:\n new_diagram = Diagram.objects.create(title=title, author=\n request.user, text=code, image_url=url)\n if new_diagram != None:\n data['message'] = 'Диаграмма успешно сохранена.'\n else:\n data['error'] = True\n data['error_message'] = 'Не получилось сохранить диаграмму.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef delete_user_diagram(request):\n user = request.user\n pk = request.POST['pk']\n data = {'error': False}\n if user.is_authenticated():\n diagram = get_object_or_404(Diagram, id=pk)\n if diagram.author.id == user.id:\n diagram.delete()\n else:\n data['error'] = True\n data['error_message'] = 'Недостаточно прав для данного действия.'\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не вошел в систему.'\n return JsonResponse(data)\n\n\ndef get_diagram(request):\n responce = {}\n try:\n file_name = build_diagram_from_code(request.POST['code'])\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\ndef generate_diagram(request):\n responce = {}\n text = request.POST['text']\n component_types = _parse_text_to_lines(request.POST['component_types'])\n component_names = _parse_text_to_lines(request.POST['component_names'])\n try:\n code = convert_text_to_code(text, component_types, component_names)\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'code': code, 'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n return JsonResponse(responce)\n\n\ndef _parse_text_to_lines(text):\n lines = []\n for line in text.split('\\n'):\n if _words_number(line) == 1:\n lines.append(line)\n return lines\n\n\ndef _words_number(line):\n words = re.findall('[\\\\w]+', line)\n return len(words)\n", "step-5": "from django.shortcuts import render, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom diagen.utils.DiagramCreator import build_diagram_from_code\nfrom diagen.utils.TextConverter import convert_text_to_code\nfrom diagen.utils.extraction.ComponentsExtractor import DEFAULT_COMPONENTS\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import *\nimport time\nimport re\n\n\ndef _build_default_components_text():\n text = ''\n for c in DEFAULT_COMPONENTS:\n text += c + '\\n'\n return text\n\nDEFAULT_COMPONENTS_TEXT = _build_default_components_text()\nSERV_FULL_ADDRESS = 'http://127.0.0.1:8000/'\n\n\ndef autentificate_user(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n\n data = {'error': False}\n if user is not None:\n if user.is_active:\n login(request, user)\n else:\n data['error'] = True\n data['error_message'] = 'Этот аккаутн заблокирован.'\n else:\n data['error'] = True\n data['error_message'] = 'Неправильный логин или пароль.'\n\n return JsonResponse(data)\n\n\ndef logout_user(request):\n\tlogout(request)\n\treturn render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT})\n\n\ndef registrate_user(request):\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\ttry:\n\t\tnew_user = User.objects.create_user(username, password=password)\n\texcept Exception:\n\t\treturn JsonResponse({'error': True, 'error_message': \"Пользователь с таким именем уже существует.\"})\n\telse:\n\t\treturn JsonResponse({'error': False})\n\n\ndef load_user_data(request):\n user = request.user\n data = {'error': False, 'is_autentificated': False}\n if user.is_authenticated():\n diagrams = Diagram.objects.filter(author=user)\n data['html_text'] = render_to_string('account_data.html', {\n \"diagrams\": diagrams, 'username': user.username\n })\n data['is_autentificated'] = True\n else:\n data['error'] = True\n data['error_message'] = 'Пользователь не выполнил вход в аккаунт.'\n return JsonResponse(data)\n\ndef load_user_diagram(request):\n\tuser = request.user\n\tpk = request.POST['pk']\n\n\tdata = {'error': False}\n\tif user.is_authenticated():\n\t\tdiagram = get_object_or_404(Diagram, id=pk)\n\t\tif diagram.author.id == user.id:\n\t\t\tdata['code'] = diagram.text\n\t\t\tdata['url'] = diagram.image_url\n\t\t\tdata['title'] = diagram.title\n\t\telse:\n\t\t\tdata['error'] = True\n\t\t\tdata['error_message'] = 'Недостаточно прав для данного действия.'\n\telse:\n\t\tdata['error'] = True\n\t\tdata['error_message'] = 'Пользователь не вошел в систему.'\n\treturn JsonResponse(data)\n\ndef diagen_main(request):\n return render(request, 'index.html', {'components': DEFAULT_COMPONENTS_TEXT})\n\n\ndef save_diagram_for_user(request):\n\tdata = {'error': False}\n\tif request.user.is_authenticated():\n\t\tcode = request.POST['code']\n\t\tfile_name = build_diagram_from_code(code)\n\t\turl = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n\t\ttitle = request.POST['title']\n\n\t\tdiagram = Diagram.objects.filter(author=request.user, title=title)\n\t\tif diagram.count() == 1:\n\t\t\tdiagram = diagram[0]\n\t\t\tdiagram.image_url = url\n\t\t\tdiagram.text = code\n\t\t\tdiagram.save()\n\t\t\tdata['message'] = 'Диаграмма успешно обновлена.'\n\t\telse:\n\t\t\tnew_diagram = Diagram.objects.create(title=title, author=request.user, text=code, image_url=url)\n\t\t\tif new_diagram != None:\n\t\t\t\tdata['message'] = 'Диаграмма успешно сохранена.'\n\t\t\telse:\n\t\t\t\tdata['error'] = True\n\t\t\t\tdata['error_message'] = 'Не получилось сохранить диаграмму.'\n\telse:\n\t\tdata['error'] = True\n\t\tdata['error_message'] = 'Пользователь не вошел в систему.'\n\n\treturn JsonResponse(data)\n\n\ndef delete_user_diagram(request):\n\tuser = request.user\n\tpk = request.POST['pk']\n\n\tdata = {'error': False}\n\tif user.is_authenticated():\n\t\tdiagram = get_object_or_404(Diagram, id=pk)\n\t\tif diagram.author.id == user.id:\n\t\t\tdiagram.delete()\n\t\telse:\n\t\t\tdata['error'] = True\n\t\t\tdata['error_message'] = 'Недостаточно прав для данного действия.'\n\telse:\n\t\tdata['error'] = True\n\t\tdata['error_message'] = 'Пользователь не вошел в систему.'\n\treturn JsonResponse(data)\n\n\ndef get_diagram(request):\n responce = {}\n\n try:\n file_name = build_diagram_from_code(request.POST['code'])\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n\n return JsonResponse(responce)\n\n\ndef generate_diagram(request):\n responce = {}\n text = request.POST['text']\n component_types = _parse_text_to_lines(request.POST['component_types'])\n component_names = _parse_text_to_lines(request.POST['component_names'])\n\n try:\n code = convert_text_to_code(text, component_types, component_names)\n file_name = build_diagram_from_code(code)\n url = SERV_FULL_ADDRESS + 'static/diagrams/' + file_name\n responce = {'code': code, 'image_url': url}\n except Exception as e:\n responce = {'error': 'true', 'message': str(e)}\n\n return JsonResponse(responce)\n\n\ndef _parse_text_to_lines(text):\n lines = []\n for line in text.split('\\n'):\n if _words_number(line) == 1:\n lines.append(line)\n return lines\n\n\ndef _words_number(line):\n words = re.findall(r\"[\\w]+\", line)\n return len(words)\n", "step-ids": [ 8, 10, 13, 14, 16 ] }
[ 8, 10, 13, 14, 16 ]
<|reserved_special_token_0|> @MultiSerializer.register(lambda x: True) class PickleSerializer(BaseSerializer): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, Exception)) class ExceptionSerializer(BaseSerializer): """ Exception serialization. """ signature = '_e' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int, float))) class BasicSerializer(BaseSerializer): """ Basic serialization of simple python types. """ signature = '_b' def serialize(self, data): return data def deserialize(self, data): return data class Encoder(object): """ Handles how args and kwargs are encoded over zmq ports. By default zerorpc does not support passing kwargs to remote methods. This class is used to fix that so args are kwargs are combined into a single args payload that is then deconstructed on the remote side. """ _default_serializer = PickleSerializer def __init__(self, serializer=None): if serializer is None: serializer = self._default_serializer() self.serializer = serializer def encode(self, *args, **kwargs): """ Encode args and kwargs as a single serialized payload. Parameters ---------- args : *Any kwargs : **Any Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ return self.serializer.serialize(args), self.serializer.serialize( kwargs) def decode(self, *payload): """ Decode encoded args and kwargs. Parameters ---------- payload : Tuple[Tuple[Any, ...], Dict[Any, Any]] Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ if not payload: return (), {} args, kwargs = payload return self.serializer.deserialize(args), self.serializer.deserialize( kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> class MultiSerializer(BaseSerializer): <|reserved_special_token_0|> <|reserved_special_token_0|> @classmethod def register(cls, claim_func): """ Decorator for registering a callable to serialize certain types. Parameters ---------- claim_func : Callable[Any, bool] Returns ------- Callable[[T], T] """ def _deco(serializer): cls._registered.insert(0, (claim_func, serializer)) return serializer return _deco def __init__(self): self._serializers = {} self._claims = [] for claim_func, serializerCls in self._registered: assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.' assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format( serializerCls.signature) serializer = serializerCls() self._claims.append((serializerCls.signature, claim_func)) self._serializers[serializerCls.signature] = serializer def serialize(self, data): if isinstance(data, (list, tuple, set)): return type(data)(self.serialize(x) for x in data) elif isinstance(data, MutableMapping): return type(data)({self.serialize(k): self.serialize(v) for k, v in data.items()}) for name, claim_func in self._claims: if claim_func(data): return name, self._serializers[name].serialize(data) raise ValueError('No serializer found for {!r}'.format(data)) def deserialize(self, payload): if not payload: return payload if isinstance(payload, (tuple, list)) and len(payload ) == 2 and payload[0] in self._serializers.keys(): signature, data = payload if signature not in self._serializers: raise ValueError('No deserializer found for {!r}'.format(data)) return self._serializers[signature].deserialize(data) if isinstance(payload, (list, tuple, set)): return type(payload)(self.deserialize(x) for x in payload) elif isinstance(payload, MutableMapping): return type(payload)({self.deserialize(k): self.deserialize(v) for k, v in payload.items()}) else: raise NotImplementedError @MultiSerializer.register(lambda x: True) class PickleSerializer(BaseSerializer): """ Pickle serialization of python objects over the zmq ports. """ signature = '_p' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, Exception)) class ExceptionSerializer(BaseSerializer): """ Exception serialization. """ signature = '_e' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int, float))) class BasicSerializer(BaseSerializer): """ Basic serialization of simple python types. """ signature = '_b' def serialize(self, data): return data def deserialize(self, data): return data class Encoder(object): """ Handles how args and kwargs are encoded over zmq ports. By default zerorpc does not support passing kwargs to remote methods. This class is used to fix that so args are kwargs are combined into a single args payload that is then deconstructed on the remote side. """ _default_serializer = PickleSerializer def __init__(self, serializer=None): if serializer is None: serializer = self._default_serializer() self.serializer = serializer def encode(self, *args, **kwargs): """ Encode args and kwargs as a single serialized payload. Parameters ---------- args : *Any kwargs : **Any Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ return self.serializer.serialize(args), self.serializer.serialize( kwargs) def decode(self, *payload): """ Decode encoded args and kwargs. Parameters ---------- payload : Tuple[Tuple[Any, ...], Dict[Any, Any]] Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ if not payload: return (), {} args, kwargs = payload return self.serializer.deserialize(args), self.serializer.deserialize( kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseSerializer(Generic[T]): <|reserved_special_token_0|> signature = None @abc.abstractmethod def serialize(self, data): """ Serialize a python object to transport over zmq. Parameters ---------- data : T Returns ------- Any """ raise NotImplementedError @abc.abstractmethod def deserialize(self, data): """ Deserialize a python object. Counter of `serialize`. Parameters ---------- data : Any Returns ------- T """ return NotImplementedError class MultiSerializer(BaseSerializer): """ Serializer with multple sub-serializers that can register methods to claim certain python objects. All serialized objects (besides list, tuples, sets, dicts) are represented as a tuple of (serializer.signature, serialized_value). This is so data can be properly decoded on the remote side. Register new sub-serializers using the register decorator: @MultiSerializer.register(lamba x: isinstance(x, MyCls)) class MyClsSerializer(BaseSerializer): ... """ _registered = [] @classmethod def register(cls, claim_func): """ Decorator for registering a callable to serialize certain types. Parameters ---------- claim_func : Callable[Any, bool] Returns ------- Callable[[T], T] """ def _deco(serializer): cls._registered.insert(0, (claim_func, serializer)) return serializer return _deco def __init__(self): self._serializers = {} self._claims = [] for claim_func, serializerCls in self._registered: assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.' assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format( serializerCls.signature) serializer = serializerCls() self._claims.append((serializerCls.signature, claim_func)) self._serializers[serializerCls.signature] = serializer def serialize(self, data): if isinstance(data, (list, tuple, set)): return type(data)(self.serialize(x) for x in data) elif isinstance(data, MutableMapping): return type(data)({self.serialize(k): self.serialize(v) for k, v in data.items()}) for name, claim_func in self._claims: if claim_func(data): return name, self._serializers[name].serialize(data) raise ValueError('No serializer found for {!r}'.format(data)) def deserialize(self, payload): if not payload: return payload if isinstance(payload, (tuple, list)) and len(payload ) == 2 and payload[0] in self._serializers.keys(): signature, data = payload if signature not in self._serializers: raise ValueError('No deserializer found for {!r}'.format(data)) return self._serializers[signature].deserialize(data) if isinstance(payload, (list, tuple, set)): return type(payload)(self.deserialize(x) for x in payload) elif isinstance(payload, MutableMapping): return type(payload)({self.deserialize(k): self.deserialize(v) for k, v in payload.items()}) else: raise NotImplementedError @MultiSerializer.register(lambda x: True) class PickleSerializer(BaseSerializer): """ Pickle serialization of python objects over the zmq ports. """ signature = '_p' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, Exception)) class ExceptionSerializer(BaseSerializer): """ Exception serialization. """ signature = '_e' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int, float))) class BasicSerializer(BaseSerializer): """ Basic serialization of simple python types. """ signature = '_b' def serialize(self, data): return data def deserialize(self, data): return data class Encoder(object): """ Handles how args and kwargs are encoded over zmq ports. By default zerorpc does not support passing kwargs to remote methods. This class is used to fix that so args are kwargs are combined into a single args payload that is then deconstructed on the remote side. """ _default_serializer = PickleSerializer def __init__(self, serializer=None): if serializer is None: serializer = self._default_serializer() self.serializer = serializer def encode(self, *args, **kwargs): """ Encode args and kwargs as a single serialized payload. Parameters ---------- args : *Any kwargs : **Any Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ return self.serializer.serialize(args), self.serializer.serialize( kwargs) def decode(self, *payload): """ Decode encoded args and kwargs. Parameters ---------- payload : Tuple[Tuple[Any, ...], Dict[Any, Any]] Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ if not payload: return (), {} args, kwargs = payload return self.serializer.deserialize(args), self.serializer.deserialize( kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseSerializer(Generic[T]): """ The serializer is responsible for converting complex python data types into primitive types that can be sent over zmq ports via msgpack. """ signature = None @abc.abstractmethod def serialize(self, data): """ Serialize a python object to transport over zmq. Parameters ---------- data : T Returns ------- Any """ raise NotImplementedError @abc.abstractmethod def deserialize(self, data): """ Deserialize a python object. Counter of `serialize`. Parameters ---------- data : Any Returns ------- T """ return NotImplementedError class MultiSerializer(BaseSerializer): """ Serializer with multple sub-serializers that can register methods to claim certain python objects. All serialized objects (besides list, tuples, sets, dicts) are represented as a tuple of (serializer.signature, serialized_value). This is so data can be properly decoded on the remote side. Register new sub-serializers using the register decorator: @MultiSerializer.register(lamba x: isinstance(x, MyCls)) class MyClsSerializer(BaseSerializer): ... """ _registered = [] @classmethod def register(cls, claim_func): """ Decorator for registering a callable to serialize certain types. Parameters ---------- claim_func : Callable[Any, bool] Returns ------- Callable[[T], T] """ def _deco(serializer): cls._registered.insert(0, (claim_func, serializer)) return serializer return _deco def __init__(self): self._serializers = {} self._claims = [] for claim_func, serializerCls in self._registered: assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.' assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format( serializerCls.signature) serializer = serializerCls() self._claims.append((serializerCls.signature, claim_func)) self._serializers[serializerCls.signature] = serializer def serialize(self, data): if isinstance(data, (list, tuple, set)): return type(data)(self.serialize(x) for x in data) elif isinstance(data, MutableMapping): return type(data)({self.serialize(k): self.serialize(v) for k, v in data.items()}) for name, claim_func in self._claims: if claim_func(data): return name, self._serializers[name].serialize(data) raise ValueError('No serializer found for {!r}'.format(data)) def deserialize(self, payload): if not payload: return payload if isinstance(payload, (tuple, list)) and len(payload ) == 2 and payload[0] in self._serializers.keys(): signature, data = payload if signature not in self._serializers: raise ValueError('No deserializer found for {!r}'.format(data)) return self._serializers[signature].deserialize(data) if isinstance(payload, (list, tuple, set)): return type(payload)(self.deserialize(x) for x in payload) elif isinstance(payload, MutableMapping): return type(payload)({self.deserialize(k): self.deserialize(v) for k, v in payload.items()}) else: raise NotImplementedError @MultiSerializer.register(lambda x: True) class PickleSerializer(BaseSerializer): """ Pickle serialization of python objects over the zmq ports. """ signature = '_p' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, Exception)) class ExceptionSerializer(BaseSerializer): """ Exception serialization. """ signature = '_e' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, (str, unicode, bytes, int, float))) class BasicSerializer(BaseSerializer): """ Basic serialization of simple python types. """ signature = '_b' def serialize(self, data): return data def deserialize(self, data): return data class Encoder(object): """ Handles how args and kwargs are encoded over zmq ports. By default zerorpc does not support passing kwargs to remote methods. This class is used to fix that so args are kwargs are combined into a single args payload that is then deconstructed on the remote side. """ _default_serializer = PickleSerializer def __init__(self, serializer=None): if serializer is None: serializer = self._default_serializer() self.serializer = serializer def encode(self, *args, **kwargs): """ Encode args and kwargs as a single serialized payload. Parameters ---------- args : *Any kwargs : **Any Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ return self.serializer.serialize(args), self.serializer.serialize( kwargs) def decode(self, *payload): """ Decode encoded args and kwargs. Parameters ---------- payload : Tuple[Tuple[Any, ...], Dict[Any, Any]] Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ if not payload: return (), {} args, kwargs = payload return self.serializer.deserialize(args), self.serializer.deserialize( kwargs) <|reserved_special_token_1|> import abc try: import cPickle as pickle except ImportError: import pickle from typing import * T = TypeVar('T') class BaseSerializer(Generic[T]): """ The serializer is responsible for converting complex python data types into primitive types that can be sent over zmq ports via msgpack. """ # Used within the `MultiSerializer` to embed which serializer to use for # round-trip data serialization. signature = None # type: str @abc.abstractmethod def serialize(self, data): """ Serialize a python object to transport over zmq. Parameters ---------- data : T Returns ------- Any """ raise NotImplementedError @abc.abstractmethod def deserialize(self, data): """ Deserialize a python object. Counter of `serialize`. Parameters ---------- data : Any Returns ------- T """ return NotImplementedError class MultiSerializer(BaseSerializer): """ Serializer with multple sub-serializers that can register methods to claim certain python objects. All serialized objects (besides list, tuples, sets, dicts) are represented as a tuple of (serializer.signature, serialized_value). This is so data can be properly decoded on the remote side. Register new sub-serializers using the register decorator: @MultiSerializer.register(lamba x: isinstance(x, MyCls)) class MyClsSerializer(BaseSerializer): ... """ _registered = [] @classmethod def register(cls, claim_func): """ Decorator for registering a callable to serialize certain types. Parameters ---------- claim_func : Callable[Any, bool] Returns ------- Callable[[T], T] """ def _deco(serializer): cls._registered.insert(0, (claim_func, serializer)) return serializer return _deco def __init__(self): self._serializers = {} # type: Dict[str, BaseSerializer] self._claims = [] # type: List[Tuple[str, Callable[[Any], bool]]] for claim_func, serializerCls in self._registered: assert serializerCls.signature is not None, \ 'Populate the serializer.signature attribute.' assert serializerCls.signature not in self._serializers, \ 'Existing serializer with signature ' \ '{!r}'.format(serializerCls.signature) serializer = serializerCls() self._claims.append((serializerCls.signature, claim_func)) self._serializers[serializerCls.signature] = serializer def serialize(self, data): if isinstance(data, (list, tuple, set)): return type(data)(self.serialize(x) for x in data) elif isinstance(data, MutableMapping): return type(data)({self.serialize(k): self.serialize(v) for k, v in data.items()}) for name, claim_func in self._claims: if claim_func(data): return name, self._serializers[name].serialize(data) raise ValueError('No serializer found for {!r}'.format(data)) def deserialize(self, payload): if not payload: return payload if isinstance(payload, (tuple, list)) \ and len(payload) == 2 \ and payload[0] in self._serializers.keys(): signature, data = payload if signature not in self._serializers: raise ValueError('No deserializer found for {!r}'.format(data)) return self._serializers[signature].deserialize(data) if isinstance(payload, (list, tuple, set)): return type(payload)(self.deserialize(x) for x in payload) elif isinstance(payload, MutableMapping): return type(payload)({self.deserialize(k): self.deserialize(v) for k, v in payload.items()}) else: raise NotImplementedError @MultiSerializer.register(lambda x: True) class PickleSerializer(BaseSerializer): """ Pickle serialization of python objects over the zmq ports. """ signature = '_p' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register(lambda x: isinstance(x, Exception)) class ExceptionSerializer(BaseSerializer): """ Exception serialization. """ signature = '_e' def serialize(self, data): return pickle.dumps(data, -1) def deserialize(self, data): return pickle.loads(data) @MultiSerializer.register( lambda x: isinstance(x, (str, unicode, bytes, int, float))) class BasicSerializer(BaseSerializer): """ Basic serialization of simple python types. """ signature = '_b' def serialize(self, data): return data def deserialize(self, data): return data class Encoder(object): """ Handles how args and kwargs are encoded over zmq ports. By default zerorpc does not support passing kwargs to remote methods. This class is used to fix that so args are kwargs are combined into a single args payload that is then deconstructed on the remote side. """ _default_serializer = PickleSerializer def __init__(self, serializer=None): if serializer is None: serializer = self._default_serializer() self.serializer = serializer def encode(self, *args, **kwargs): """ Encode args and kwargs as a single serialized payload. Parameters ---------- args : *Any kwargs : **Any Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ return self.serializer.serialize(args), \ self.serializer.serialize(kwargs) def decode(self, *payload): """ Decode encoded args and kwargs. Parameters ---------- payload : Tuple[Tuple[Any, ...], Dict[Any, Any]] Returns ------- Tuple[Tuple[Any, ...], Dict[Any, Any]] """ if not payload: return (), {} args, kwargs = payload return self.serializer.deserialize(args), \ self.serializer.deserialize(kwargs)
flexible
{ "blob_id": "94f5fa411f8a41985caaf4eb7ab1cb4e45439405", "index": 1524, "step-1": "<mask token>\n\n\[email protected](lambda x: True)\nclass PickleSerializer(BaseSerializer):\n <mask token>\n <mask token>\n <mask token>\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, Exception))\nclass ExceptionSerializer(BaseSerializer):\n \"\"\"\n Exception serialization.\n \"\"\"\n signature = '_e'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, (str, unicode, bytes, int,\n float)))\nclass BasicSerializer(BaseSerializer):\n \"\"\"\n Basic serialization of simple python types.\n \"\"\"\n signature = '_b'\n\n def serialize(self, data):\n return data\n\n def deserialize(self, data):\n return data\n\n\nclass Encoder(object):\n \"\"\"\n Handles how args and kwargs are encoded over zmq ports.\n\n By default zerorpc does not support passing kwargs to remote methods.\n This class is used to fix that so args are kwargs are combined into a\n single args payload that is then deconstructed on the remote side.\n \"\"\"\n _default_serializer = PickleSerializer\n\n def __init__(self, serializer=None):\n if serializer is None:\n serializer = self._default_serializer()\n self.serializer = serializer\n\n def encode(self, *args, **kwargs):\n \"\"\"\n Encode args and kwargs as a single serialized payload.\n\n Parameters\n ----------\n args : *Any\n kwargs : **Any\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n return self.serializer.serialize(args), self.serializer.serialize(\n kwargs)\n\n def decode(self, *payload):\n \"\"\"\n Decode encoded args and kwargs.\n\n Parameters\n ----------\n payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), self.serializer.deserialize(\n kwargs)\n", "step-2": "<mask token>\n\n\nclass MultiSerializer(BaseSerializer):\n <mask token>\n <mask token>\n\n @classmethod\n def register(cls, claim_func):\n \"\"\"\n Decorator for registering a callable to serialize certain types.\n\n Parameters\n ----------\n claim_func : Callable[Any, bool]\n\n Returns\n -------\n Callable[[T], T]\n \"\"\"\n\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco\n\n def __init__(self):\n self._serializers = {}\n self._claims = []\n for claim_func, serializerCls in self._registered:\n assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'\n assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(\n serializerCls.signature)\n serializer = serializerCls()\n self._claims.append((serializerCls.signature, claim_func))\n self._serializers[serializerCls.signature] = serializer\n\n def serialize(self, data):\n if isinstance(data, (list, tuple, set)):\n return type(data)(self.serialize(x) for x in data)\n elif isinstance(data, MutableMapping):\n return type(data)({self.serialize(k): self.serialize(v) for k,\n v in data.items()})\n for name, claim_func in self._claims:\n if claim_func(data):\n return name, self._serializers[name].serialize(data)\n raise ValueError('No serializer found for {!r}'.format(data))\n\n def deserialize(self, payload):\n if not payload:\n return payload\n if isinstance(payload, (tuple, list)) and len(payload\n ) == 2 and payload[0] in self._serializers.keys():\n signature, data = payload\n if signature not in self._serializers:\n raise ValueError('No deserializer found for {!r}'.format(data))\n return self._serializers[signature].deserialize(data)\n if isinstance(payload, (list, tuple, set)):\n return type(payload)(self.deserialize(x) for x in payload)\n elif isinstance(payload, MutableMapping):\n return type(payload)({self.deserialize(k): self.deserialize(v) for\n k, v in payload.items()})\n else:\n raise NotImplementedError\n\n\[email protected](lambda x: True)\nclass PickleSerializer(BaseSerializer):\n \"\"\"\n Pickle serialization of python objects over the zmq ports.\n \"\"\"\n signature = '_p'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, Exception))\nclass ExceptionSerializer(BaseSerializer):\n \"\"\"\n Exception serialization.\n \"\"\"\n signature = '_e'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, (str, unicode, bytes, int,\n float)))\nclass BasicSerializer(BaseSerializer):\n \"\"\"\n Basic serialization of simple python types.\n \"\"\"\n signature = '_b'\n\n def serialize(self, data):\n return data\n\n def deserialize(self, data):\n return data\n\n\nclass Encoder(object):\n \"\"\"\n Handles how args and kwargs are encoded over zmq ports.\n\n By default zerorpc does not support passing kwargs to remote methods.\n This class is used to fix that so args are kwargs are combined into a\n single args payload that is then deconstructed on the remote side.\n \"\"\"\n _default_serializer = PickleSerializer\n\n def __init__(self, serializer=None):\n if serializer is None:\n serializer = self._default_serializer()\n self.serializer = serializer\n\n def encode(self, *args, **kwargs):\n \"\"\"\n Encode args and kwargs as a single serialized payload.\n\n Parameters\n ----------\n args : *Any\n kwargs : **Any\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n return self.serializer.serialize(args), self.serializer.serialize(\n kwargs)\n\n def decode(self, *payload):\n \"\"\"\n Decode encoded args and kwargs.\n\n Parameters\n ----------\n payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), self.serializer.deserialize(\n kwargs)\n", "step-3": "<mask token>\n\n\nclass BaseSerializer(Generic[T]):\n <mask token>\n signature = None\n\n @abc.abstractmethod\n def serialize(self, data):\n \"\"\"\n Serialize a python object to transport over zmq.\n\n Parameters\n ----------\n data : T\n\n Returns\n -------\n Any\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def deserialize(self, data):\n \"\"\"\n Deserialize a python object. Counter of `serialize`.\n\n Parameters\n ----------\n data : Any\n\n Returns\n -------\n T\n \"\"\"\n return NotImplementedError\n\n\nclass MultiSerializer(BaseSerializer):\n \"\"\"\n Serializer with multple sub-serializers that can register methods to claim\n certain python objects.\n\n All serialized objects (besides list, tuples, sets, dicts) are represented\n as a tuple of (serializer.signature, serialized_value). This is so data\n can be properly decoded on the remote side.\n\n Register new sub-serializers using the register decorator:\n\n @MultiSerializer.register(lamba x: isinstance(x, MyCls))\n class MyClsSerializer(BaseSerializer):\n ...\n \"\"\"\n _registered = []\n\n @classmethod\n def register(cls, claim_func):\n \"\"\"\n Decorator for registering a callable to serialize certain types.\n\n Parameters\n ----------\n claim_func : Callable[Any, bool]\n\n Returns\n -------\n Callable[[T], T]\n \"\"\"\n\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco\n\n def __init__(self):\n self._serializers = {}\n self._claims = []\n for claim_func, serializerCls in self._registered:\n assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'\n assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(\n serializerCls.signature)\n serializer = serializerCls()\n self._claims.append((serializerCls.signature, claim_func))\n self._serializers[serializerCls.signature] = serializer\n\n def serialize(self, data):\n if isinstance(data, (list, tuple, set)):\n return type(data)(self.serialize(x) for x in data)\n elif isinstance(data, MutableMapping):\n return type(data)({self.serialize(k): self.serialize(v) for k,\n v in data.items()})\n for name, claim_func in self._claims:\n if claim_func(data):\n return name, self._serializers[name].serialize(data)\n raise ValueError('No serializer found for {!r}'.format(data))\n\n def deserialize(self, payload):\n if not payload:\n return payload\n if isinstance(payload, (tuple, list)) and len(payload\n ) == 2 and payload[0] in self._serializers.keys():\n signature, data = payload\n if signature not in self._serializers:\n raise ValueError('No deserializer found for {!r}'.format(data))\n return self._serializers[signature].deserialize(data)\n if isinstance(payload, (list, tuple, set)):\n return type(payload)(self.deserialize(x) for x in payload)\n elif isinstance(payload, MutableMapping):\n return type(payload)({self.deserialize(k): self.deserialize(v) for\n k, v in payload.items()})\n else:\n raise NotImplementedError\n\n\[email protected](lambda x: True)\nclass PickleSerializer(BaseSerializer):\n \"\"\"\n Pickle serialization of python objects over the zmq ports.\n \"\"\"\n signature = '_p'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, Exception))\nclass ExceptionSerializer(BaseSerializer):\n \"\"\"\n Exception serialization.\n \"\"\"\n signature = '_e'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, (str, unicode, bytes, int,\n float)))\nclass BasicSerializer(BaseSerializer):\n \"\"\"\n Basic serialization of simple python types.\n \"\"\"\n signature = '_b'\n\n def serialize(self, data):\n return data\n\n def deserialize(self, data):\n return data\n\n\nclass Encoder(object):\n \"\"\"\n Handles how args and kwargs are encoded over zmq ports.\n\n By default zerorpc does not support passing kwargs to remote methods.\n This class is used to fix that so args are kwargs are combined into a\n single args payload that is then deconstructed on the remote side.\n \"\"\"\n _default_serializer = PickleSerializer\n\n def __init__(self, serializer=None):\n if serializer is None:\n serializer = self._default_serializer()\n self.serializer = serializer\n\n def encode(self, *args, **kwargs):\n \"\"\"\n Encode args and kwargs as a single serialized payload.\n\n Parameters\n ----------\n args : *Any\n kwargs : **Any\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n return self.serializer.serialize(args), self.serializer.serialize(\n kwargs)\n\n def decode(self, *payload):\n \"\"\"\n Decode encoded args and kwargs.\n\n Parameters\n ----------\n payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), self.serializer.deserialize(\n kwargs)\n", "step-4": "<mask token>\n\n\nclass BaseSerializer(Generic[T]):\n \"\"\"\n The serializer is responsible for converting complex python data types\n into primitive types that can be sent over zmq ports via msgpack.\n \"\"\"\n signature = None\n\n @abc.abstractmethod\n def serialize(self, data):\n \"\"\"\n Serialize a python object to transport over zmq.\n\n Parameters\n ----------\n data : T\n\n Returns\n -------\n Any\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def deserialize(self, data):\n \"\"\"\n Deserialize a python object. Counter of `serialize`.\n\n Parameters\n ----------\n data : Any\n\n Returns\n -------\n T\n \"\"\"\n return NotImplementedError\n\n\nclass MultiSerializer(BaseSerializer):\n \"\"\"\n Serializer with multple sub-serializers that can register methods to claim\n certain python objects.\n\n All serialized objects (besides list, tuples, sets, dicts) are represented\n as a tuple of (serializer.signature, serialized_value). This is so data\n can be properly decoded on the remote side.\n\n Register new sub-serializers using the register decorator:\n\n @MultiSerializer.register(lamba x: isinstance(x, MyCls))\n class MyClsSerializer(BaseSerializer):\n ...\n \"\"\"\n _registered = []\n\n @classmethod\n def register(cls, claim_func):\n \"\"\"\n Decorator for registering a callable to serialize certain types.\n\n Parameters\n ----------\n claim_func : Callable[Any, bool]\n\n Returns\n -------\n Callable[[T], T]\n \"\"\"\n\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco\n\n def __init__(self):\n self._serializers = {}\n self._claims = []\n for claim_func, serializerCls in self._registered:\n assert serializerCls.signature is not None, 'Populate the serializer.signature attribute.'\n assert serializerCls.signature not in self._serializers, 'Existing serializer with signature {!r}'.format(\n serializerCls.signature)\n serializer = serializerCls()\n self._claims.append((serializerCls.signature, claim_func))\n self._serializers[serializerCls.signature] = serializer\n\n def serialize(self, data):\n if isinstance(data, (list, tuple, set)):\n return type(data)(self.serialize(x) for x in data)\n elif isinstance(data, MutableMapping):\n return type(data)({self.serialize(k): self.serialize(v) for k,\n v in data.items()})\n for name, claim_func in self._claims:\n if claim_func(data):\n return name, self._serializers[name].serialize(data)\n raise ValueError('No serializer found for {!r}'.format(data))\n\n def deserialize(self, payload):\n if not payload:\n return payload\n if isinstance(payload, (tuple, list)) and len(payload\n ) == 2 and payload[0] in self._serializers.keys():\n signature, data = payload\n if signature not in self._serializers:\n raise ValueError('No deserializer found for {!r}'.format(data))\n return self._serializers[signature].deserialize(data)\n if isinstance(payload, (list, tuple, set)):\n return type(payload)(self.deserialize(x) for x in payload)\n elif isinstance(payload, MutableMapping):\n return type(payload)({self.deserialize(k): self.deserialize(v) for\n k, v in payload.items()})\n else:\n raise NotImplementedError\n\n\[email protected](lambda x: True)\nclass PickleSerializer(BaseSerializer):\n \"\"\"\n Pickle serialization of python objects over the zmq ports.\n \"\"\"\n signature = '_p'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, Exception))\nclass ExceptionSerializer(BaseSerializer):\n \"\"\"\n Exception serialization.\n \"\"\"\n signature = '_e'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, (str, unicode, bytes, int,\n float)))\nclass BasicSerializer(BaseSerializer):\n \"\"\"\n Basic serialization of simple python types.\n \"\"\"\n signature = '_b'\n\n def serialize(self, data):\n return data\n\n def deserialize(self, data):\n return data\n\n\nclass Encoder(object):\n \"\"\"\n Handles how args and kwargs are encoded over zmq ports.\n\n By default zerorpc does not support passing kwargs to remote methods.\n This class is used to fix that so args are kwargs are combined into a\n single args payload that is then deconstructed on the remote side.\n \"\"\"\n _default_serializer = PickleSerializer\n\n def __init__(self, serializer=None):\n if serializer is None:\n serializer = self._default_serializer()\n self.serializer = serializer\n\n def encode(self, *args, **kwargs):\n \"\"\"\n Encode args and kwargs as a single serialized payload.\n\n Parameters\n ----------\n args : *Any\n kwargs : **Any\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n return self.serializer.serialize(args), self.serializer.serialize(\n kwargs)\n\n def decode(self, *payload):\n \"\"\"\n Decode encoded args and kwargs.\n\n Parameters\n ----------\n payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), self.serializer.deserialize(\n kwargs)\n", "step-5": "import abc\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom typing import *\n\n\nT = TypeVar('T')\n\n\nclass BaseSerializer(Generic[T]):\n \"\"\"\n The serializer is responsible for converting complex python data types\n into primitive types that can be sent over zmq ports via msgpack.\n \"\"\"\n # Used within the `MultiSerializer` to embed which serializer to use for\n # round-trip data serialization.\n signature = None # type: str\n\n @abc.abstractmethod\n def serialize(self, data):\n \"\"\"\n Serialize a python object to transport over zmq.\n\n Parameters\n ----------\n data : T\n\n Returns\n -------\n Any\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def deserialize(self, data):\n \"\"\"\n Deserialize a python object. Counter of `serialize`.\n\n Parameters\n ----------\n data : Any\n\n Returns\n -------\n T\n \"\"\"\n return NotImplementedError\n\n\nclass MultiSerializer(BaseSerializer):\n \"\"\"\n Serializer with multple sub-serializers that can register methods to claim\n certain python objects.\n\n All serialized objects (besides list, tuples, sets, dicts) are represented\n as a tuple of (serializer.signature, serialized_value). This is so data\n can be properly decoded on the remote side.\n\n Register new sub-serializers using the register decorator:\n\n @MultiSerializer.register(lamba x: isinstance(x, MyCls))\n class MyClsSerializer(BaseSerializer):\n ...\n \"\"\"\n\n _registered = []\n\n @classmethod\n def register(cls, claim_func):\n \"\"\"\n Decorator for registering a callable to serialize certain types.\n\n Parameters\n ----------\n claim_func : Callable[Any, bool]\n\n Returns\n -------\n Callable[[T], T]\n \"\"\"\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco\n\n def __init__(self):\n self._serializers = {} # type: Dict[str, BaseSerializer]\n self._claims = [] # type: List[Tuple[str, Callable[[Any], bool]]]\n for claim_func, serializerCls in self._registered:\n assert serializerCls.signature is not None, \\\n 'Populate the serializer.signature attribute.'\n assert serializerCls.signature not in self._serializers, \\\n 'Existing serializer with signature ' \\\n '{!r}'.format(serializerCls.signature)\n serializer = serializerCls()\n self._claims.append((serializerCls.signature, claim_func))\n self._serializers[serializerCls.signature] = serializer\n\n def serialize(self, data):\n if isinstance(data, (list, tuple, set)):\n return type(data)(self.serialize(x) for x in data)\n elif isinstance(data, MutableMapping):\n return type(data)({self.serialize(k): self.serialize(v)\n for k, v in data.items()})\n for name, claim_func in self._claims:\n if claim_func(data):\n return name, self._serializers[name].serialize(data)\n raise ValueError('No serializer found for {!r}'.format(data))\n\n def deserialize(self, payload):\n if not payload:\n return payload\n if isinstance(payload, (tuple, list)) \\\n and len(payload) == 2 \\\n and payload[0] in self._serializers.keys():\n signature, data = payload\n if signature not in self._serializers:\n raise ValueError('No deserializer found for {!r}'.format(data))\n return self._serializers[signature].deserialize(data)\n if isinstance(payload, (list, tuple, set)):\n return type(payload)(self.deserialize(x) for x in payload)\n elif isinstance(payload, MutableMapping):\n return type(payload)({self.deserialize(k): self.deserialize(v)\n for k, v in payload.items()})\n else:\n raise NotImplementedError\n\n\[email protected](lambda x: True)\nclass PickleSerializer(BaseSerializer):\n \"\"\"\n Pickle serialization of python objects over the zmq ports.\n \"\"\"\n signature = '_p'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](lambda x: isinstance(x, Exception))\nclass ExceptionSerializer(BaseSerializer):\n \"\"\"\n Exception serialization.\n \"\"\"\n signature = '_e'\n\n def serialize(self, data):\n return pickle.dumps(data, -1)\n\n def deserialize(self, data):\n return pickle.loads(data)\n\n\[email protected](\n lambda x: isinstance(x, (str, unicode, bytes, int, float)))\nclass BasicSerializer(BaseSerializer):\n \"\"\"\n Basic serialization of simple python types.\n \"\"\"\n signature = '_b'\n\n def serialize(self, data):\n return data\n\n def deserialize(self, data):\n return data\n\n\nclass Encoder(object):\n \"\"\"\n Handles how args and kwargs are encoded over zmq ports.\n\n By default zerorpc does not support passing kwargs to remote methods.\n This class is used to fix that so args are kwargs are combined into a\n single args payload that is then deconstructed on the remote side.\n \"\"\"\n _default_serializer = PickleSerializer\n\n def __init__(self, serializer=None):\n if serializer is None:\n serializer = self._default_serializer()\n self.serializer = serializer\n\n def encode(self, *args, **kwargs):\n \"\"\"\n Encode args and kwargs as a single serialized payload.\n\n Parameters\n ----------\n args : *Any\n kwargs : **Any\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n return self.serializer.serialize(args), \\\n self.serializer.serialize(kwargs)\n\n def decode(self, *payload):\n \"\"\"\n Decode encoded args and kwargs.\n\n Parameters\n ----------\n payload : Tuple[Tuple[Any, ...], Dict[Any, Any]]\n\n Returns\n -------\n Tuple[Tuple[Any, ...], Dict[Any, Any]]\n \"\"\"\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), \\\n self.serializer.deserialize(kwargs)\n", "step-ids": [ 18, 26, 32, 33, 37 ] }
[ 18, 26, 32, 33, 37 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with con: cur = con.cursor() cur.execute('DROP TABLE IF EXISTS log') cur.execute( 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)' ) <|reserved_special_token_1|> <|reserved_special_token_0|> con = lite.connect('./logs.db') with con: cur = con.cursor() cur.execute('DROP TABLE IF EXISTS log') cur.execute( 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)' ) <|reserved_special_token_1|> import sqlite3 as lite con = lite.connect('./logs.db') with con: cur = con.cursor() cur.execute('DROP TABLE IF EXISTS log') cur.execute( 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)' ) <|reserved_special_token_1|> #!/usr/bin/python # -*- coding: utf-8 -*- import sqlite3 as lite con = lite.connect('./logs.db') with con: cur = con.cursor() cur.execute("DROP TABLE IF EXISTS log") cur.execute('''CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)''')
flexible
{ "blob_id": "1c31649ac75214a6d26bcb6d6822579be91e5074", "index": 2748, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n", "step-3": "<mask token>\ncon = lite.connect('./logs.db')\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n", "step-4": "import sqlite3 as lite\ncon = lite.connect('./logs.db')\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n", "step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sqlite3 as lite\n\ncon = lite.connect('./logs.db')\n\nwith con: \n cur = con.cursor() \n cur.execute(\"DROP TABLE IF EXISTS log\")\n cur.execute('''CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)''')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' Function Description Complete the extraLongFactorials function in the editor below. It should print the result and return. extraLongFactorials has the following parameter(s): n: an integer Note: Factorials of can't be stored even in a long long variable. Big integers must be used for such calculations. Languages like Java, Python, Ruby etc. can handle big integers, but we need to write additional code in C/C++ to handle huge values. We recommend solving this challenge using BigIntegers. Input Format Input consists of a single integer Output Format Print the factorial of. ''' #!/bin/python3 import math import os import random import re import sys def extraLongFactorials(n): print(math.factorial(n)) if __name__ == '__main__': n = int(input()) extraLongFactorials(n)
normal
{ "blob_id": "5c1ce46f45da33acf75a7f47add811b14d58414d", "index": 1169, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\nif __name__ == '__main__':\n n = int(input())\n extraLongFactorials(n)\n", "step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\nif __name__ == '__main__':\n n = int(input())\n extraLongFactorials(n)\n", "step-5": "'''\r\nFunction Description\r\n\r\nComplete the extraLongFactorials function in the editor below. It should print the result and return.\r\n\r\nextraLongFactorials has the following parameter(s):\r\n\r\n n: an integer\r\n\r\nNote: Factorials of\r\ncan't be stored even in a\r\n\r\nlong long variable. Big integers must be used for such calculations. Languages like Java, Python, Ruby etc. can handle big integers, but we need to write additional code in C/C++ to handle huge values.\r\n\r\nWe recommend solving this challenge using BigIntegers.\r\n\r\nInput Format\r\n\r\nInput consists of a single integer \r\nOutput Format\r\n\r\nPrint the factorial of. \r\n'''\n \r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\ndef extraLongFactorials(n):\r\n print(math.factorial(n))\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n extraLongFactorials(n)\r\n \n \n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
file = open("yo.txt", "wr") file.write("Yo")
normal
{ "blob_id": "207b6e56b683c0b069c531a4c6076c2822814390", "index": 512, "step-1": "<mask token>\n", "step-2": "<mask token>\nfile.write('Yo')\n", "step-3": "file = open('yo.txt', 'wr')\nfile.write('Yo')\n", "step-4": "file = open(\"yo.txt\", \"wr\")\n\nfile.write(\"Yo\")\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" Schema management for various object types (publisher, dataset etc). Loads the jsonschema and allows callers to validate a dictionary against them. """ import os import json import pubtool.lib.validators as v from jsonschema import validate, validators from jsonschema.exceptions import ValidationError SCHEMA = { "publisher": None, "dataset": None } class ObjectValidationErrors(Exception): def __init__(self, errors): self.errors = errors def _get_directory(): p = os.path.dirname(__file__) p = os.path.join(p, os.pardir, os.pardir, "schema") p = os.path.abspath(p) return p def _get_schema(name): """ Load, if necessary, the schema for the specific name and return it """ global SCHEMA loaded_schema = SCHEMA.get(name) if not loaded_schema: filename = "{}/{}.json".format(_get_directory(), name) if os.path.exists(filename): SCHEMA[name] = json.load(open(filename, 'r')) return SCHEMA.get(name) def validation_check(object_type, data): from jsonschema import Draft4Validator schema = _get_schema(object_type) if not schema: # raise ValidationError, not Exception raise Exception() new_validators = v.load_validators() custom_validator = validators.extend( Draft4Validator, validators=new_validators ) validator = custom_validator(schema) errors = sorted(validator.iter_errors(data), key=lambda e: e.path) errors = [v.message for v in errors] return errors
normal
{ "blob_id": "c4f39f9212fbe0f591543d143cb8f1721c1f8e1e", "index": 7056, "step-1": "<mask token>\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n", "step-3": "<mask token>\nSCHEMA = {'publisher': None, 'dataset': None}\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n", "step-4": "<mask token>\nimport os\nimport json\nimport pubtool.lib.validators as v\nfrom jsonschema import validate, validators\nfrom jsonschema.exceptions import ValidationError\nSCHEMA = {'publisher': None, 'dataset': None}\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n", "step-5": "\"\"\"\nSchema management for various object types (publisher, dataset etc). Loads\nthe jsonschema and allows callers to validate a dictionary against them.\n\"\"\"\nimport os\nimport json\n\nimport pubtool.lib.validators as v\n\nfrom jsonschema import validate, validators\nfrom jsonschema.exceptions import ValidationError\n\nSCHEMA = {\n \"publisher\": None,\n \"dataset\": None\n}\n\nclass ObjectValidationErrors(Exception):\n def __init__(self, errors):\n self.errors = errors\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, \"schema\")\n p = os.path.abspath(p)\n return p\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n\n schema = _get_schema(object_type)\n if not schema:\n # raise ValidationError, not Exception\n raise Exception()\n\n new_validators = v.load_validators()\n\n custom_validator = validators.extend(\n Draft4Validator,\n validators=new_validators\n )\n validator = custom_validator(schema)\n\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n\n return errors", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
<|reserved_special_token_0|> class ArgParser: <|reserved_special_token_0|> def add_parser_argument(self, parser, option_name, options): params = self.prepare_params(options) alias = params.pop('alias', None) positional = params.pop('positional', False) param_name = '--{}'.format(option_name) if positional: parser.add_argument(option_name, **params) elif alias is None: parser.add_argument(param_name, **params) else: parser.add_argument(param_name, '-{}'.format(alias), **params) @staticmethod def check_args(args): for element in args: if element.startswith('--') and '_' in element: raise ValueError('Wrong "{}" option provided.\n'.format( element) + """Arguments containing '_' are not allowed. """ + "Use '-' instead\n") def read_configuration(self): self.parse_conf = load_yaml_file('argparser.yaml', path=os.path. dirname(os.path.realpath(__file__))) try: pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir, is_optional=True) if len(pinit_conf) < 1: pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os. curdir, is_optional=True) except AttributeError as e: log.exit(e) self.host_configuration = pinit_conf.pop('project_configuration', {}) for key, value in pinit_conf.items(): if value is None: continue if not isinstance(value, dict): if key in self.parse_conf['options']: self.parse_conf['options'][key]['default'] = value else: print('\nUnknown parameter {} found in {}\n'.format(key, PROJECTRC)) elif key not in self.parse_conf['subcommands']: print('\nUnknown command {} found in {}\n'.format(key, PROJECTRC)) else: conf = self.parse_conf['subcommands'][key]['suboptions'] for subkey, subvalue in value.items(): if subkey in conf: conf[subkey]['default'] = subvalue else: print('Unknown parameter {}/{} found in {}\n'. format(key, subkey, PROJECTRC)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ArgParser: <|reserved_special_token_0|> def add_parser_argument(self, parser, option_name, options): params = self.prepare_params(options) alias = params.pop('alias', None) positional = params.pop('positional', False) param_name = '--{}'.format(option_name) if positional: parser.add_argument(option_name, **params) elif alias is None: parser.add_argument(param_name, **params) else: parser.add_argument(param_name, '-{}'.format(alias), **params) @staticmethod def check_args(args): for element in args: if element.startswith('--') and '_' in element: raise ValueError('Wrong "{}" option provided.\n'.format( element) + """Arguments containing '_' are not allowed. """ + "Use '-' instead\n") def read_configuration(self): self.parse_conf = load_yaml_file('argparser.yaml', path=os.path. dirname(os.path.realpath(__file__))) try: pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir, is_optional=True) if len(pinit_conf) < 1: pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os. curdir, is_optional=True) except AttributeError as e: log.exit(e) self.host_configuration = pinit_conf.pop('project_configuration', {}) for key, value in pinit_conf.items(): if value is None: continue if not isinstance(value, dict): if key in self.parse_conf['options']: self.parse_conf['options'][key]['default'] = value else: print('\nUnknown parameter {} found in {}\n'.format(key, PROJECTRC)) elif key not in self.parse_conf['subcommands']: print('\nUnknown command {} found in {}\n'.format(key, PROJECTRC)) else: conf = self.parse_conf['subcommands'][key]['suboptions'] for subkey, subvalue in value.items(): if subkey in conf: conf[subkey]['default'] = subvalue else: print('Unknown parameter {}/{} found in {}\n'. format(key, subkey, PROJECTRC)) @staticmethod def prepare_params(options): pconf = {} default = options.get('default') pconf['default'] = default myhelp = '{} [default: {}]'.format(options.get('help'), default) pconf['help'] = myhelp if options.get('type') == 'bool': if default: pconf['action'] = 'store_false' else: pconf['action'] = 'store_true' else: pconf['type'] = str pconf['metavar'] = options.get('metavalue') if 'alias' in options: pconf['alias'] = options['alias'] if 'positional' in options: pconf['positional'] = options['positional'] return pconf <|reserved_special_token_1|> <|reserved_special_token_0|> class ArgParser: def __init__(self, args=None): if args is None: args = sys.argv self.current_args = {} self.host_configuration = {} self.check_args(args) self.read_configuration() parser = argparse.ArgumentParser(prog=args[0], description=self. parse_conf.get('description')) sorted_options = sorted(self.parse_conf.get('options', {}).items()) for option_name, options in sorted_options: self.add_parser_argument(parser, option_name, options) version_string = 'rapydo version {}'.format(__version__) parser.add_argument('--version', action='version', version= version_string) main_command = self.parse_conf.get('action') subparsers = parser.add_subparsers(title='Available commands', dest =main_command.get('name'), help=main_command.get('help')) subparsers.required = True mycommands = self.parse_conf.get('subcommands', {}) for command_name, options in sorted(mycommands.items()): subparse = subparsers.add_parser(command_name, help=options.get ('description')) suboptions = options.get('suboptions', {}).items() for option_name, suboptions in suboptions: self.add_parser_argument(subparse, option_name, suboptions) if len(args) == 1: parser.print_help() sys.exit(1) current_args_namespace, self.remaining_args = parser.parse_known_args( args[1:]) self.current_args = vars(current_args_namespace) self.extra_parser = argparse.ArgumentParser(description= 'Custom rapydo commands from your own configuration', add_help= False, usage=""" $ rapydo custom CUSTOM_COMMAND""") self.extra_command_parser = self.extra_parser.add_subparsers(title= 'Available custom commands', dest='custom', help= 'list of custom commands') self.extra_command_parser.required = True if self.current_args.get('log_level', 'DEPRECATED') != 'DEPRECATED': log.warning( '--log-level parameter is deprecated, set env variable LOGURU_LEVEL' ) log.verbose('Parsed arguments: {}', self.current_args) def add_parser_argument(self, parser, option_name, options): params = self.prepare_params(options) alias = params.pop('alias', None) positional = params.pop('positional', False) param_name = '--{}'.format(option_name) if positional: parser.add_argument(option_name, **params) elif alias is None: parser.add_argument(param_name, **params) else: parser.add_argument(param_name, '-{}'.format(alias), **params) @staticmethod def check_args(args): for element in args: if element.startswith('--') and '_' in element: raise ValueError('Wrong "{}" option provided.\n'.format( element) + """Arguments containing '_' are not allowed. """ + "Use '-' instead\n") def read_configuration(self): self.parse_conf = load_yaml_file('argparser.yaml', path=os.path. dirname(os.path.realpath(__file__))) try: pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir, is_optional=True) if len(pinit_conf) < 1: pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os. curdir, is_optional=True) except AttributeError as e: log.exit(e) self.host_configuration = pinit_conf.pop('project_configuration', {}) for key, value in pinit_conf.items(): if value is None: continue if not isinstance(value, dict): if key in self.parse_conf['options']: self.parse_conf['options'][key]['default'] = value else: print('\nUnknown parameter {} found in {}\n'.format(key, PROJECTRC)) elif key not in self.parse_conf['subcommands']: print('\nUnknown command {} found in {}\n'.format(key, PROJECTRC)) else: conf = self.parse_conf['subcommands'][key]['suboptions'] for subkey, subvalue in value.items(): if subkey in conf: conf[subkey]['default'] = subvalue else: print('Unknown parameter {}/{} found in {}\n'. format(key, subkey, PROJECTRC)) @staticmethod def prepare_params(options): pconf = {} default = options.get('default') pconf['default'] = default myhelp = '{} [default: {}]'.format(options.get('help'), default) pconf['help'] = myhelp if options.get('type') == 'bool': if default: pconf['action'] = 'store_false' else: pconf['action'] = 'store_true' else: pconf['type'] = str pconf['metavar'] = options.get('metavalue') if 'alias' in options: pconf['alias'] = options['alias'] if 'positional' in options: pconf['positional'] = options['positional'] return pconf <|reserved_special_token_1|> <|reserved_special_token_0|> import os import sys import argparse from controller import __version__, PROJECTRC, PROJECTRC_ALTERNATIVE from controller.conf_utilities import load_yaml_file from controller import log class ArgParser: def __init__(self, args=None): if args is None: args = sys.argv self.current_args = {} self.host_configuration = {} self.check_args(args) self.read_configuration() parser = argparse.ArgumentParser(prog=args[0], description=self. parse_conf.get('description')) sorted_options = sorted(self.parse_conf.get('options', {}).items()) for option_name, options in sorted_options: self.add_parser_argument(parser, option_name, options) version_string = 'rapydo version {}'.format(__version__) parser.add_argument('--version', action='version', version= version_string) main_command = self.parse_conf.get('action') subparsers = parser.add_subparsers(title='Available commands', dest =main_command.get('name'), help=main_command.get('help')) subparsers.required = True mycommands = self.parse_conf.get('subcommands', {}) for command_name, options in sorted(mycommands.items()): subparse = subparsers.add_parser(command_name, help=options.get ('description')) suboptions = options.get('suboptions', {}).items() for option_name, suboptions in suboptions: self.add_parser_argument(subparse, option_name, suboptions) if len(args) == 1: parser.print_help() sys.exit(1) current_args_namespace, self.remaining_args = parser.parse_known_args( args[1:]) self.current_args = vars(current_args_namespace) self.extra_parser = argparse.ArgumentParser(description= 'Custom rapydo commands from your own configuration', add_help= False, usage=""" $ rapydo custom CUSTOM_COMMAND""") self.extra_command_parser = self.extra_parser.add_subparsers(title= 'Available custom commands', dest='custom', help= 'list of custom commands') self.extra_command_parser.required = True if self.current_args.get('log_level', 'DEPRECATED') != 'DEPRECATED': log.warning( '--log-level parameter is deprecated, set env variable LOGURU_LEVEL' ) log.verbose('Parsed arguments: {}', self.current_args) def add_parser_argument(self, parser, option_name, options): params = self.prepare_params(options) alias = params.pop('alias', None) positional = params.pop('positional', False) param_name = '--{}'.format(option_name) if positional: parser.add_argument(option_name, **params) elif alias is None: parser.add_argument(param_name, **params) else: parser.add_argument(param_name, '-{}'.format(alias), **params) @staticmethod def check_args(args): for element in args: if element.startswith('--') and '_' in element: raise ValueError('Wrong "{}" option provided.\n'.format( element) + """Arguments containing '_' are not allowed. """ + "Use '-' instead\n") def read_configuration(self): self.parse_conf = load_yaml_file('argparser.yaml', path=os.path. dirname(os.path.realpath(__file__))) try: pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir, is_optional=True) if len(pinit_conf) < 1: pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os. curdir, is_optional=True) except AttributeError as e: log.exit(e) self.host_configuration = pinit_conf.pop('project_configuration', {}) for key, value in pinit_conf.items(): if value is None: continue if not isinstance(value, dict): if key in self.parse_conf['options']: self.parse_conf['options'][key]['default'] = value else: print('\nUnknown parameter {} found in {}\n'.format(key, PROJECTRC)) elif key not in self.parse_conf['subcommands']: print('\nUnknown command {} found in {}\n'.format(key, PROJECTRC)) else: conf = self.parse_conf['subcommands'][key]['suboptions'] for subkey, subvalue in value.items(): if subkey in conf: conf[subkey]['default'] = subvalue else: print('Unknown parameter {}/{} found in {}\n'. format(key, subkey, PROJECTRC)) @staticmethod def prepare_params(options): pconf = {} default = options.get('default') pconf['default'] = default myhelp = '{} [default: {}]'.format(options.get('help'), default) pconf['help'] = myhelp if options.get('type') == 'bool': if default: pconf['action'] = 'store_false' else: pconf['action'] = 'store_true' else: pconf['type'] = str pconf['metavar'] = options.get('metavalue') if 'alias' in options: pconf['alias'] = options['alias'] if 'positional' in options: pconf['positional'] = options['positional'] return pconf <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Automatically create and parse commands based on a YAML configuration file. NOTE: we can't have a logger here, before knowing the level of debug. """ import os import sys import argparse from controller import __version__, PROJECTRC, PROJECTRC_ALTERNATIVE from controller.conf_utilities import load_yaml_file from controller import log class ArgParser: def __init__(self, args=None): if args is None: args = sys.argv self.current_args = {} self.host_configuration = {} # This method can raise ValueErrors self.check_args(args) # This method saves configuration objects in self self.read_configuration() # Arguments definition parser = argparse.ArgumentParser( prog=args[0], description=self.parse_conf.get('description') ) # PARAMETERS sorted_options = sorted(self.parse_conf.get('options', {}).items()) for option_name, options in sorted_options: self.add_parser_argument(parser, option_name, options) version_string = 'rapydo version {}'.format(__version__) parser.add_argument('--version', action='version', version=version_string) # Sub-parser of commands [check, init, etc] main_command = self.parse_conf.get('action') subparsers = parser.add_subparsers( title='Available commands', dest=main_command.get('name'), help=main_command.get('help'), ) subparsers.required = True # ########################## # COMMANDS # BASE normal commands mycommands = self.parse_conf.get('subcommands', {}) for command_name, options in sorted(mycommands.items()): # Creating a parser for each sub-command [check, init, etc] subparse = subparsers.add_parser( command_name, help=options.get('description') ) # controlcommands = options.get('controlcommands', {}) # # Some subcommands can have further subcommands # [control start, stop, etc] # if len(controlcommands) > 0: # innerparser = subparse.add_subparsers( # dest='controlcommand' # ) # innerparser.required = options.get('controlrequired', False) # for subcommand, suboptions in controlcommands.items(): # subcommand_help = suboptions.pop(0) # # Creating a parser for each sub-sub-command # # [control start/stop] # innerparser.add_parser(subcommand, help=subcommand_help) suboptions = options.get('suboptions', {}).items() for option_name, suboptions in suboptions: self.add_parser_argument(subparse, option_name, suboptions) # ########################## # Print usage if no arguments provided if len(args) == 1: parser.print_help() sys.exit(1) # ########################## # Reading input parameters # Partial parsing # https://docs.python.org/3.4/library/argparse.html#partial-parsing # Example # https://gist.github.com/von/949337/ # self.current_args = parser.parse_args() current_args_namespace, self.remaining_args = parser.parse_known_args(args[1:]) self.current_args = vars(current_args_namespace) # custom commands as a separate parser self.extra_parser = argparse.ArgumentParser( description='Custom rapydo commands from your own configuration', add_help=False, usage='\n$ rapydo custom CUSTOM_COMMAND', ) self.extra_command_parser = self.extra_parser.add_subparsers( title='Available custom commands', dest='custom', help='list of custom commands', ) self.extra_command_parser.required = True # ########################## if self.current_args.get("log_level", "DEPRECATED") != "DEPRECATED": # Deprecated since version 0.7.0 log.warning( "--log-level parameter is deprecated, set env variable LOGURU_LEVEL") log.verbose("Parsed arguments: {}", self.current_args) def add_parser_argument(self, parser, option_name, options): params = self.prepare_params(options) alias = params.pop('alias', None) positional = params.pop('positional', False) param_name = '--{}'.format(option_name) if positional: parser.add_argument(option_name, **params) elif alias is None: parser.add_argument(param_name, **params) else: parser.add_argument(param_name, '-{}'.format(alias), **params) @staticmethod def check_args(args): # Check on format for element in args: if element.startswith('--') and '_' in element: raise ValueError( "Wrong \"{}\" option provided.\n".format(element) + "Arguments containing '_' are not allowed.\n" + "Use '-' instead\n" ) # NOTE: the standard is to use only '-' separators for arguments # beware: argparse converts them into '_' when you want to retrieve def read_configuration(self): # READ MAIN FILE WITH COMMANDS AND OPTIONS self.parse_conf = load_yaml_file( 'argparser.yaml', path=os.path.dirname(os.path.realpath(__file__)) ) try: # READ PROJECT INIT FILE: .projectrc pinit_conf = load_yaml_file( PROJECTRC, path=os.curdir, is_optional=True) # Allow alternative for PROJECT INIT FILE: .project.yml if len(pinit_conf) < 1: pinit_conf = load_yaml_file( PROJECTRC_ALTERNATIVE, path=os.curdir, is_optional=True) except AttributeError as e: log.exit(e) self.host_configuration = pinit_conf.pop('project_configuration', {}) # Mix with parse_conf for key, value in pinit_conf.items(): # value = pinit_conf.get(key, None) if value is None: continue if not isinstance(value, dict): # This is a first level option if key in self.parse_conf['options']: self.parse_conf['options'][key]['default'] = value else: print("\nUnknown parameter {} found in {}\n".format(key, PROJECTRC)) else: # This is a second level parameter if key not in self.parse_conf['subcommands']: print("\nUnknown command {} found in {}\n".format(key, PROJECTRC)) else: conf = self.parse_conf['subcommands'][key]['suboptions'] for subkey, subvalue in value.items(): if subkey in conf: conf[subkey]['default'] = subvalue else: print("Unknown parameter {}/{} found in {}\n".format( key, subkey, PROJECTRC)) @staticmethod def prepare_params(options): pconf = {} default = options.get('default') pconf['default'] = default myhelp = "{} [default: {}]".format(options.get('help'), default) pconf['help'] = myhelp if options.get('type') == 'bool': if default: pconf['action'] = 'store_false' else: pconf['action'] = 'store_true' else: # type and metavar are allowed for bool pconf['type'] = str pconf['metavar'] = options.get('metavalue') if 'alias' in options: pconf['alias'] = options['alias'] if 'positional' in options: pconf['positional'] = options['positional'] return pconf
flexible
{ "blob_id": "94559d9fd296acd468c33d6b0541b974575b8852", "index": 4119, "step-1": "<mask token>\n\n\nclass ArgParser:\n <mask token>\n\n def add_parser_argument(self, parser, option_name, options):\n params = self.prepare_params(options)\n alias = params.pop('alias', None)\n positional = params.pop('positional', False)\n param_name = '--{}'.format(option_name)\n if positional:\n parser.add_argument(option_name, **params)\n elif alias is None:\n parser.add_argument(param_name, **params)\n else:\n parser.add_argument(param_name, '-{}'.format(alias), **params)\n\n @staticmethod\n def check_args(args):\n for element in args:\n if element.startswith('--') and '_' in element:\n raise ValueError('Wrong \"{}\" option provided.\\n'.format(\n element) +\n \"\"\"Arguments containing '_' are not allowed.\n\"\"\" +\n \"Use '-' instead\\n\")\n\n def read_configuration(self):\n self.parse_conf = load_yaml_file('argparser.yaml', path=os.path.\n dirname(os.path.realpath(__file__)))\n try:\n pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir,\n is_optional=True)\n if len(pinit_conf) < 1:\n pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os.\n curdir, is_optional=True)\n except AttributeError as e:\n log.exit(e)\n self.host_configuration = pinit_conf.pop('project_configuration', {})\n for key, value in pinit_conf.items():\n if value is None:\n continue\n if not isinstance(value, dict):\n if key in self.parse_conf['options']:\n self.parse_conf['options'][key]['default'] = value\n else:\n print('\\nUnknown parameter {} found in {}\\n'.format(key,\n PROJECTRC))\n elif key not in self.parse_conf['subcommands']:\n print('\\nUnknown command {} found in {}\\n'.format(key,\n PROJECTRC))\n else:\n conf = self.parse_conf['subcommands'][key]['suboptions']\n for subkey, subvalue in value.items():\n if subkey in conf:\n conf[subkey]['default'] = subvalue\n else:\n print('Unknown parameter {}/{} found in {}\\n'.\n format(key, subkey, PROJECTRC))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ArgParser:\n <mask token>\n\n def add_parser_argument(self, parser, option_name, options):\n params = self.prepare_params(options)\n alias = params.pop('alias', None)\n positional = params.pop('positional', False)\n param_name = '--{}'.format(option_name)\n if positional:\n parser.add_argument(option_name, **params)\n elif alias is None:\n parser.add_argument(param_name, **params)\n else:\n parser.add_argument(param_name, '-{}'.format(alias), **params)\n\n @staticmethod\n def check_args(args):\n for element in args:\n if element.startswith('--') and '_' in element:\n raise ValueError('Wrong \"{}\" option provided.\\n'.format(\n element) +\n \"\"\"Arguments containing '_' are not allowed.\n\"\"\" +\n \"Use '-' instead\\n\")\n\n def read_configuration(self):\n self.parse_conf = load_yaml_file('argparser.yaml', path=os.path.\n dirname(os.path.realpath(__file__)))\n try:\n pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir,\n is_optional=True)\n if len(pinit_conf) < 1:\n pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os.\n curdir, is_optional=True)\n except AttributeError as e:\n log.exit(e)\n self.host_configuration = pinit_conf.pop('project_configuration', {})\n for key, value in pinit_conf.items():\n if value is None:\n continue\n if not isinstance(value, dict):\n if key in self.parse_conf['options']:\n self.parse_conf['options'][key]['default'] = value\n else:\n print('\\nUnknown parameter {} found in {}\\n'.format(key,\n PROJECTRC))\n elif key not in self.parse_conf['subcommands']:\n print('\\nUnknown command {} found in {}\\n'.format(key,\n PROJECTRC))\n else:\n conf = self.parse_conf['subcommands'][key]['suboptions']\n for subkey, subvalue in value.items():\n if subkey in conf:\n conf[subkey]['default'] = subvalue\n else:\n print('Unknown parameter {}/{} found in {}\\n'.\n format(key, subkey, PROJECTRC))\n\n @staticmethod\n def prepare_params(options):\n pconf = {}\n default = options.get('default')\n pconf['default'] = default\n myhelp = '{} [default: {}]'.format(options.get('help'), default)\n pconf['help'] = myhelp\n if options.get('type') == 'bool':\n if default:\n pconf['action'] = 'store_false'\n else:\n pconf['action'] = 'store_true'\n else:\n pconf['type'] = str\n pconf['metavar'] = options.get('metavalue')\n if 'alias' in options:\n pconf['alias'] = options['alias']\n if 'positional' in options:\n pconf['positional'] = options['positional']\n return pconf\n", "step-3": "<mask token>\n\n\nclass ArgParser:\n\n def __init__(self, args=None):\n if args is None:\n args = sys.argv\n self.current_args = {}\n self.host_configuration = {}\n self.check_args(args)\n self.read_configuration()\n parser = argparse.ArgumentParser(prog=args[0], description=self.\n parse_conf.get('description'))\n sorted_options = sorted(self.parse_conf.get('options', {}).items())\n for option_name, options in sorted_options:\n self.add_parser_argument(parser, option_name, options)\n version_string = 'rapydo version {}'.format(__version__)\n parser.add_argument('--version', action='version', version=\n version_string)\n main_command = self.parse_conf.get('action')\n subparsers = parser.add_subparsers(title='Available commands', dest\n =main_command.get('name'), help=main_command.get('help'))\n subparsers.required = True\n mycommands = self.parse_conf.get('subcommands', {})\n for command_name, options in sorted(mycommands.items()):\n subparse = subparsers.add_parser(command_name, help=options.get\n ('description'))\n suboptions = options.get('suboptions', {}).items()\n for option_name, suboptions in suboptions:\n self.add_parser_argument(subparse, option_name, suboptions)\n if len(args) == 1:\n parser.print_help()\n sys.exit(1)\n current_args_namespace, self.remaining_args = parser.parse_known_args(\n args[1:])\n self.current_args = vars(current_args_namespace)\n self.extra_parser = argparse.ArgumentParser(description=\n 'Custom rapydo commands from your own configuration', add_help=\n False, usage=\"\"\"\n$ rapydo custom CUSTOM_COMMAND\"\"\")\n self.extra_command_parser = self.extra_parser.add_subparsers(title=\n 'Available custom commands', dest='custom', help=\n 'list of custom commands')\n self.extra_command_parser.required = True\n if self.current_args.get('log_level', 'DEPRECATED') != 'DEPRECATED':\n log.warning(\n '--log-level parameter is deprecated, set env variable LOGURU_LEVEL'\n )\n log.verbose('Parsed arguments: {}', self.current_args)\n\n def add_parser_argument(self, parser, option_name, options):\n params = self.prepare_params(options)\n alias = params.pop('alias', None)\n positional = params.pop('positional', False)\n param_name = '--{}'.format(option_name)\n if positional:\n parser.add_argument(option_name, **params)\n elif alias is None:\n parser.add_argument(param_name, **params)\n else:\n parser.add_argument(param_name, '-{}'.format(alias), **params)\n\n @staticmethod\n def check_args(args):\n for element in args:\n if element.startswith('--') and '_' in element:\n raise ValueError('Wrong \"{}\" option provided.\\n'.format(\n element) +\n \"\"\"Arguments containing '_' are not allowed.\n\"\"\" +\n \"Use '-' instead\\n\")\n\n def read_configuration(self):\n self.parse_conf = load_yaml_file('argparser.yaml', path=os.path.\n dirname(os.path.realpath(__file__)))\n try:\n pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir,\n is_optional=True)\n if len(pinit_conf) < 1:\n pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os.\n curdir, is_optional=True)\n except AttributeError as e:\n log.exit(e)\n self.host_configuration = pinit_conf.pop('project_configuration', {})\n for key, value in pinit_conf.items():\n if value is None:\n continue\n if not isinstance(value, dict):\n if key in self.parse_conf['options']:\n self.parse_conf['options'][key]['default'] = value\n else:\n print('\\nUnknown parameter {} found in {}\\n'.format(key,\n PROJECTRC))\n elif key not in self.parse_conf['subcommands']:\n print('\\nUnknown command {} found in {}\\n'.format(key,\n PROJECTRC))\n else:\n conf = self.parse_conf['subcommands'][key]['suboptions']\n for subkey, subvalue in value.items():\n if subkey in conf:\n conf[subkey]['default'] = subvalue\n else:\n print('Unknown parameter {}/{} found in {}\\n'.\n format(key, subkey, PROJECTRC))\n\n @staticmethod\n def prepare_params(options):\n pconf = {}\n default = options.get('default')\n pconf['default'] = default\n myhelp = '{} [default: {}]'.format(options.get('help'), default)\n pconf['help'] = myhelp\n if options.get('type') == 'bool':\n if default:\n pconf['action'] = 'store_false'\n else:\n pconf['action'] = 'store_true'\n else:\n pconf['type'] = str\n pconf['metavar'] = options.get('metavalue')\n if 'alias' in options:\n pconf['alias'] = options['alias']\n if 'positional' in options:\n pconf['positional'] = options['positional']\n return pconf\n", "step-4": "<mask token>\nimport os\nimport sys\nimport argparse\nfrom controller import __version__, PROJECTRC, PROJECTRC_ALTERNATIVE\nfrom controller.conf_utilities import load_yaml_file\nfrom controller import log\n\n\nclass ArgParser:\n\n def __init__(self, args=None):\n if args is None:\n args = sys.argv\n self.current_args = {}\n self.host_configuration = {}\n self.check_args(args)\n self.read_configuration()\n parser = argparse.ArgumentParser(prog=args[0], description=self.\n parse_conf.get('description'))\n sorted_options = sorted(self.parse_conf.get('options', {}).items())\n for option_name, options in sorted_options:\n self.add_parser_argument(parser, option_name, options)\n version_string = 'rapydo version {}'.format(__version__)\n parser.add_argument('--version', action='version', version=\n version_string)\n main_command = self.parse_conf.get('action')\n subparsers = parser.add_subparsers(title='Available commands', dest\n =main_command.get('name'), help=main_command.get('help'))\n subparsers.required = True\n mycommands = self.parse_conf.get('subcommands', {})\n for command_name, options in sorted(mycommands.items()):\n subparse = subparsers.add_parser(command_name, help=options.get\n ('description'))\n suboptions = options.get('suboptions', {}).items()\n for option_name, suboptions in suboptions:\n self.add_parser_argument(subparse, option_name, suboptions)\n if len(args) == 1:\n parser.print_help()\n sys.exit(1)\n current_args_namespace, self.remaining_args = parser.parse_known_args(\n args[1:])\n self.current_args = vars(current_args_namespace)\n self.extra_parser = argparse.ArgumentParser(description=\n 'Custom rapydo commands from your own configuration', add_help=\n False, usage=\"\"\"\n$ rapydo custom CUSTOM_COMMAND\"\"\")\n self.extra_command_parser = self.extra_parser.add_subparsers(title=\n 'Available custom commands', dest='custom', help=\n 'list of custom commands')\n self.extra_command_parser.required = True\n if self.current_args.get('log_level', 'DEPRECATED') != 'DEPRECATED':\n log.warning(\n '--log-level parameter is deprecated, set env variable LOGURU_LEVEL'\n )\n log.verbose('Parsed arguments: {}', self.current_args)\n\n def add_parser_argument(self, parser, option_name, options):\n params = self.prepare_params(options)\n alias = params.pop('alias', None)\n positional = params.pop('positional', False)\n param_name = '--{}'.format(option_name)\n if positional:\n parser.add_argument(option_name, **params)\n elif alias is None:\n parser.add_argument(param_name, **params)\n else:\n parser.add_argument(param_name, '-{}'.format(alias), **params)\n\n @staticmethod\n def check_args(args):\n for element in args:\n if element.startswith('--') and '_' in element:\n raise ValueError('Wrong \"{}\" option provided.\\n'.format(\n element) +\n \"\"\"Arguments containing '_' are not allowed.\n\"\"\" +\n \"Use '-' instead\\n\")\n\n def read_configuration(self):\n self.parse_conf = load_yaml_file('argparser.yaml', path=os.path.\n dirname(os.path.realpath(__file__)))\n try:\n pinit_conf = load_yaml_file(PROJECTRC, path=os.curdir,\n is_optional=True)\n if len(pinit_conf) < 1:\n pinit_conf = load_yaml_file(PROJECTRC_ALTERNATIVE, path=os.\n curdir, is_optional=True)\n except AttributeError as e:\n log.exit(e)\n self.host_configuration = pinit_conf.pop('project_configuration', {})\n for key, value in pinit_conf.items():\n if value is None:\n continue\n if not isinstance(value, dict):\n if key in self.parse_conf['options']:\n self.parse_conf['options'][key]['default'] = value\n else:\n print('\\nUnknown parameter {} found in {}\\n'.format(key,\n PROJECTRC))\n elif key not in self.parse_conf['subcommands']:\n print('\\nUnknown command {} found in {}\\n'.format(key,\n PROJECTRC))\n else:\n conf = self.parse_conf['subcommands'][key]['suboptions']\n for subkey, subvalue in value.items():\n if subkey in conf:\n conf[subkey]['default'] = subvalue\n else:\n print('Unknown parameter {}/{} found in {}\\n'.\n format(key, subkey, PROJECTRC))\n\n @staticmethod\n def prepare_params(options):\n pconf = {}\n default = options.get('default')\n pconf['default'] = default\n myhelp = '{} [default: {}]'.format(options.get('help'), default)\n pconf['help'] = myhelp\n if options.get('type') == 'bool':\n if default:\n pconf['action'] = 'store_false'\n else:\n pconf['action'] = 'store_true'\n else:\n pconf['type'] = str\n pconf['metavar'] = options.get('metavalue')\n if 'alias' in options:\n pconf['alias'] = options['alias']\n if 'positional' in options:\n pconf['positional'] = options['positional']\n return pconf\n", "step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAutomatically create and parse commands\nbased on a YAML configuration file.\n\nNOTE: we can't have a logger here,\nbefore knowing the level of debug.\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nfrom controller import __version__, PROJECTRC, PROJECTRC_ALTERNATIVE\nfrom controller.conf_utilities import load_yaml_file\nfrom controller import log\n\n\nclass ArgParser:\n def __init__(self, args=None):\n if args is None:\n args = sys.argv\n\n self.current_args = {}\n self.host_configuration = {}\n # This method can raise ValueErrors\n self.check_args(args)\n\n # This method saves configuration objects in self\n self.read_configuration()\n\n # Arguments definition\n parser = argparse.ArgumentParser(\n prog=args[0], description=self.parse_conf.get('description')\n )\n\n # PARAMETERS\n sorted_options = sorted(self.parse_conf.get('options', {}).items())\n for option_name, options in sorted_options:\n self.add_parser_argument(parser, option_name, options)\n\n version_string = 'rapydo version {}'.format(__version__)\n parser.add_argument('--version', action='version', version=version_string)\n # Sub-parser of commands [check, init, etc]\n main_command = self.parse_conf.get('action')\n\n subparsers = parser.add_subparsers(\n title='Available commands',\n dest=main_command.get('name'),\n help=main_command.get('help'),\n )\n\n subparsers.required = True\n\n # ##########################\n # COMMANDS\n\n # BASE normal commands\n mycommands = self.parse_conf.get('subcommands', {})\n\n for command_name, options in sorted(mycommands.items()):\n\n # Creating a parser for each sub-command [check, init, etc]\n subparse = subparsers.add_parser(\n command_name, help=options.get('description')\n )\n\n # controlcommands = options.get('controlcommands', {})\n # # Some subcommands can have further subcommands\n # [control start, stop, etc]\n # if len(controlcommands) > 0:\n # innerparser = subparse.add_subparsers(\n # dest='controlcommand'\n # )\n # innerparser.required = options.get('controlrequired', False)\n # for subcommand, suboptions in controlcommands.items():\n # subcommand_help = suboptions.pop(0)\n # # Creating a parser for each sub-sub-command\n # # [control start/stop]\n # innerparser.add_parser(subcommand, help=subcommand_help)\n\n suboptions = options.get('suboptions', {}).items()\n for option_name, suboptions in suboptions:\n self.add_parser_argument(subparse, option_name, suboptions)\n\n # ##########################\n # Print usage if no arguments provided\n if len(args) == 1:\n parser.print_help()\n sys.exit(1)\n\n # ##########################\n # Reading input parameters\n\n # Partial parsing\n # https://docs.python.org/3.4/library/argparse.html#partial-parsing\n # Example\n # https://gist.github.com/von/949337/\n\n # self.current_args = parser.parse_args()\n current_args_namespace, self.remaining_args = parser.parse_known_args(args[1:])\n self.current_args = vars(current_args_namespace)\n\n # custom commands as a separate parser\n self.extra_parser = argparse.ArgumentParser(\n description='Custom rapydo commands from your own configuration',\n add_help=False,\n usage='\\n$ rapydo custom CUSTOM_COMMAND',\n )\n self.extra_command_parser = self.extra_parser.add_subparsers(\n title='Available custom commands',\n dest='custom',\n help='list of custom commands',\n )\n self.extra_command_parser.required = True\n\n # ##########################\n if self.current_args.get(\"log_level\", \"DEPRECATED\") != \"DEPRECATED\":\n # Deprecated since version 0.7.0\n log.warning(\n \"--log-level parameter is deprecated, set env variable LOGURU_LEVEL\")\n\n log.verbose(\"Parsed arguments: {}\", self.current_args)\n\n def add_parser_argument(self, parser, option_name, options):\n params = self.prepare_params(options)\n alias = params.pop('alias', None)\n positional = params.pop('positional', False)\n param_name = '--{}'.format(option_name)\n if positional:\n parser.add_argument(option_name, **params)\n elif alias is None:\n parser.add_argument(param_name, **params)\n else:\n parser.add_argument(param_name, '-{}'.format(alias), **params)\n\n @staticmethod\n def check_args(args):\n # Check on format\n for element in args:\n if element.startswith('--') and '_' in element:\n raise ValueError(\n \"Wrong \\\"{}\\\" option provided.\\n\".format(element)\n + \"Arguments containing '_' are not allowed.\\n\"\n + \"Use '-' instead\\n\"\n )\n # NOTE: the standard is to use only '-' separators for arguments\n # beware: argparse converts them into '_' when you want to retrieve\n\n def read_configuration(self):\n # READ MAIN FILE WITH COMMANDS AND OPTIONS\n\n self.parse_conf = load_yaml_file(\n 'argparser.yaml', path=os.path.dirname(os.path.realpath(__file__))\n )\n\n try:\n # READ PROJECT INIT FILE: .projectrc\n pinit_conf = load_yaml_file(\n PROJECTRC, path=os.curdir, is_optional=True)\n # Allow alternative for PROJECT INIT FILE: .project.yml\n if len(pinit_conf) < 1:\n pinit_conf = load_yaml_file(\n PROJECTRC_ALTERNATIVE, path=os.curdir, is_optional=True)\n except AttributeError as e:\n log.exit(e)\n\n self.host_configuration = pinit_conf.pop('project_configuration', {})\n\n # Mix with parse_conf\n for key, value in pinit_conf.items():\n # value = pinit_conf.get(key, None)\n\n if value is None:\n continue\n\n if not isinstance(value, dict):\n # This is a first level option\n if key in self.parse_conf['options']:\n self.parse_conf['options'][key]['default'] = value\n else:\n print(\"\\nUnknown parameter {} found in {}\\n\".format(key, PROJECTRC))\n else:\n # This is a second level parameter\n if key not in self.parse_conf['subcommands']:\n print(\"\\nUnknown command {} found in {}\\n\".format(key, PROJECTRC))\n else:\n conf = self.parse_conf['subcommands'][key]['suboptions']\n for subkey, subvalue in value.items():\n if subkey in conf:\n conf[subkey]['default'] = subvalue\n else:\n print(\"Unknown parameter {}/{} found in {}\\n\".format(\n key, subkey, PROJECTRC))\n\n @staticmethod\n def prepare_params(options):\n\n pconf = {}\n default = options.get('default')\n pconf['default'] = default\n\n myhelp = \"{} [default: {}]\".format(options.get('help'), default)\n pconf['help'] = myhelp\n\n if options.get('type') == 'bool':\n\n if default:\n pconf['action'] = 'store_false'\n else:\n pconf['action'] = 'store_true'\n\n else:\n # type and metavar are allowed for bool\n pconf['type'] = str\n pconf['metavar'] = options.get('metavalue')\n\n if 'alias' in options:\n pconf['alias'] = options['alias']\n\n if 'positional' in options:\n pconf['positional'] = options['positional']\n\n return pconf\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str, bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch. Tensor, torch.Tensor]]: """ Use the incremental Welford algorithm described in [h1]_ to accumulate the mean and standard deviation over a set of samples. References: ----------- .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Args: dataset: atoms data set divide_by_atoms: dict from property name to bool: If True, divide property by number of atoms before calculating statistics. atomref: reference values for single atoms to be removed before calculating stats Returns: """ property_names = list(divide_by_atoms.keys()) norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64) count = 0 mean = torch.zeros_like(norm_mask) M2 = torch.zeros_like(norm_mask) for props in tqdm(dataloader): sample_values = [] for p in property_names: val = props[p][None, :] if atomref and p in atomref.keys(): ar = atomref[p] ar = ar[props[structure.Z]] idx_m = props[structure.idx_m] tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device= ar.device) v0 = tmp.index_add(0, idx_m, ar) val -= v0 sample_values.append(val) sample_values = torch.cat(sample_values, dim=0) batch_size = sample_values.shape[1] new_count = count + batch_size norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 - norm_mask[:, None]) sample_values /= norm sample_mean = torch.mean(sample_values, dim=1) sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1) delta = sample_mean - mean mean += delta * batch_size / new_count corr = batch_size * count / new_count M2 += sample_m2 + delta ** 2 * corr count = new_count stddev = torch.sqrt(M2 / count) stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev) } return stats <|reserved_special_token_1|> <|reserved_special_token_0|> __all__ = ['calculate_stats'] def calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str, bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch. Tensor, torch.Tensor]]: """ Use the incremental Welford algorithm described in [h1]_ to accumulate the mean and standard deviation over a set of samples. References: ----------- .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Args: dataset: atoms data set divide_by_atoms: dict from property name to bool: If True, divide property by number of atoms before calculating statistics. atomref: reference values for single atoms to be removed before calculating stats Returns: """ property_names = list(divide_by_atoms.keys()) norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64) count = 0 mean = torch.zeros_like(norm_mask) M2 = torch.zeros_like(norm_mask) for props in tqdm(dataloader): sample_values = [] for p in property_names: val = props[p][None, :] if atomref and p in atomref.keys(): ar = atomref[p] ar = ar[props[structure.Z]] idx_m = props[structure.idx_m] tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device= ar.device) v0 = tmp.index_add(0, idx_m, ar) val -= v0 sample_values.append(val) sample_values = torch.cat(sample_values, dim=0) batch_size = sample_values.shape[1] new_count = count + batch_size norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 - norm_mask[:, None]) sample_values /= norm sample_mean = torch.mean(sample_values, dim=1) sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1) delta = sample_mean - mean mean += delta * batch_size / new_count corr = batch_size * count / new_count M2 += sample_m2 + delta ** 2 * corr count = new_count stddev = torch.sqrt(M2 / count) stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev) } return stats <|reserved_special_token_1|> from typing import Dict, Tuple import torch from tqdm import tqdm import schnetpack.properties as structure from schnetpack.data import AtomsLoader __all__ = ['calculate_stats'] def calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str, bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch. Tensor, torch.Tensor]]: """ Use the incremental Welford algorithm described in [h1]_ to accumulate the mean and standard deviation over a set of samples. References: ----------- .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Args: dataset: atoms data set divide_by_atoms: dict from property name to bool: If True, divide property by number of atoms before calculating statistics. atomref: reference values for single atoms to be removed before calculating stats Returns: """ property_names = list(divide_by_atoms.keys()) norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64) count = 0 mean = torch.zeros_like(norm_mask) M2 = torch.zeros_like(norm_mask) for props in tqdm(dataloader): sample_values = [] for p in property_names: val = props[p][None, :] if atomref and p in atomref.keys(): ar = atomref[p] ar = ar[props[structure.Z]] idx_m = props[structure.idx_m] tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device= ar.device) v0 = tmp.index_add(0, idx_m, ar) val -= v0 sample_values.append(val) sample_values = torch.cat(sample_values, dim=0) batch_size = sample_values.shape[1] new_count = count + batch_size norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 - norm_mask[:, None]) sample_values /= norm sample_mean = torch.mean(sample_values, dim=1) sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1) delta = sample_mean - mean mean += delta * batch_size / new_count corr = batch_size * count / new_count M2 += sample_m2 + delta ** 2 * corr count = new_count stddev = torch.sqrt(M2 / count) stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev) } return stats <|reserved_special_token_1|> from typing import Dict, Tuple import torch from tqdm import tqdm import schnetpack.properties as structure from schnetpack.data import AtomsLoader __all__ = ["calculate_stats"] def calculate_stats( dataloader: AtomsLoader, divide_by_atoms: Dict[str, bool], atomref: Dict[str, torch.Tensor] = None, ) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]: """ Use the incremental Welford algorithm described in [h1]_ to accumulate the mean and standard deviation over a set of samples. References: ----------- .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Args: dataset: atoms data set divide_by_atoms: dict from property name to bool: If True, divide property by number of atoms before calculating statistics. atomref: reference values for single atoms to be removed before calculating stats Returns: """ property_names = list(divide_by_atoms.keys()) norm_mask = torch.tensor( [float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64 ) count = 0 mean = torch.zeros_like(norm_mask) M2 = torch.zeros_like(norm_mask) for props in tqdm(dataloader): sample_values = [] for p in property_names: val = props[p][None, :] if atomref and p in atomref.keys(): ar = atomref[p] ar = ar[props[structure.Z]] idx_m = props[structure.idx_m] tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=ar.device) v0 = tmp.index_add(0, idx_m, ar) val -= v0 sample_values.append(val) sample_values = torch.cat(sample_values, dim=0) batch_size = sample_values.shape[1] new_count = count + batch_size norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + ( 1 - norm_mask[:, None] ) sample_values /= norm sample_mean = torch.mean(sample_values, dim=1) sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1) delta = sample_mean - mean mean += delta * batch_size / new_count corr = batch_size * count / new_count M2 += sample_m2 + delta**2 * corr count = new_count stddev = torch.sqrt(M2 / count) stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)} return stats
flexible
{ "blob_id": "b2944a95dbe25057155aaf6198a97d85b3bb620b", "index": 6436, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n", "step-3": "<mask token>\n__all__ = ['calculate_stats']\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n", "step-4": "from typing import Dict, Tuple\nimport torch\nfrom tqdm import tqdm\nimport schnetpack.properties as structure\nfrom schnetpack.data import AtomsLoader\n__all__ = ['calculate_stats']\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n", "step-5": "from typing import Dict, Tuple\n\nimport torch\nfrom tqdm import tqdm\n\nimport schnetpack.properties as structure\nfrom schnetpack.data import AtomsLoader\n\n__all__ = [\"calculate_stats\"]\n\n\ndef calculate_stats(\n dataloader: AtomsLoader,\n divide_by_atoms: Dict[str, bool],\n atomref: Dict[str, torch.Tensor] = None,\n) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor(\n [float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64\n )\n\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (\n 1 - norm_mask[:, None]\n )\n sample_values /= norm\n\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1)\n\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta**2 * corr\n count = new_count\n\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)}\n return stats\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# As variáveis abaixo estão recebendo uma função anônima contador_letras = lambda lista: [len(x) for x in lista] lista_animais = ['cachorro', 'pato', 'marreco'] print(contador_letras(lista_animais))
normal
{ "blob_id": "d13957c3d3f4d34279dc660d80ca91ca84ba4a77", "index": 4504, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(contador_letras(lista_animais))\n", "step-3": "contador_letras = lambda lista: [len(x) for x in lista]\nlista_animais = ['cachorro', 'pato', 'marreco']\nprint(contador_letras(lista_animais))\n", "step-4": "# As variáveis abaixo estão recebendo uma função anônima\ncontador_letras = lambda lista: [len(x) for x in lista]\n\nlista_animais = ['cachorro', 'pato', 'marreco']\nprint(contador_letras(lista_animais))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def home(request): cursos = Curso.objects.order_by('numero') return render_to_response('home.html', {'posts': posts}) <|reserved_special_token_1|> from django.shortcuts import render from django.shortcuts import render_to_response from post.models import Post def home(request): cursos = Curso.objects.order_by('numero') return render_to_response('home.html', {'posts': posts}) <|reserved_special_token_1|> from django.shortcuts import render # Create your views here. from django.shortcuts import render_to_response from post.models import Post #def ver_un_post(request, idpost): # post = Post.objects.get(id=idpost) # # return render_to_response("post.html",{"post":post,},) def home(request): cursos = Curso.objects.order_by("numero") return render_to_response("home.html",{"posts":posts},)
flexible
{ "blob_id": "bd81f4431699b1750c69b0bbc82f066332349fbd", "index": 8976, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef home(request):\n cursos = Curso.objects.order_by('numero')\n return render_to_response('home.html', {'posts': posts})\n", "step-3": "from django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom post.models import Post\n\n\ndef home(request):\n cursos = Curso.objects.order_by('numero')\n return render_to_response('home.html', {'posts': posts})\n", "step-4": "from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.shortcuts import render_to_response\n\nfrom post.models import Post\n\n#def ver_un_post(request, idpost):\n# post = Post.objects.get(id=idpost)\n# \n# return render_to_response(\"post.html\",{\"post\":post,},)\n\ndef home(request):\n cursos = Curso.objects.order_by(\"numero\")\n \n return render_to_response(\"home.html\",{\"posts\":posts},)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def multiply(num1, num2): return num1 * num2
normal
{ "blob_id": "e835e75f444e97ca948ce27504cc9149ea0092f6", "index": 1946, "step-1": "<mask token>\n", "step-2": "def multiply(num1, num2):\n return num1 * num2\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures' <|reserved_special_token_1|> <|reserved_special_token_0|> HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True) WORDLISTS_USER_DIR.mkdir(exist_ok=True) LOGS_DIR.mkdir(exist_ok=True) DATABASE_DIR.mkdir(exist_ok=True) HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True) class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures' <|reserved_special_token_1|> <|reserved_special_token_0|> HASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server' ROOT_PRIVATE_DIR = Path(__file__).parent.parent WORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists' WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists' RULES_DIR = ROOT_PRIVATE_DIR / 'rules' MASKS_DIR = ROOT_PRIVATE_DIR / 'masks' LOGS_DIR = ROOT_PRIVATE_DIR / 'logs' DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database' ESSID_TRIED = DATABASE_DIR / 'essid_tried' DATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db' HASHCAT_STATUS_TIMER = 20 BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv' HASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' / 'hashcat_brain_password') HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True) WORDLISTS_USER_DIR.mkdir(exist_ok=True) LOGS_DIR.mkdir(exist_ok=True) DATABASE_DIR.mkdir(exist_ok=True) HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True) class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures' <|reserved_special_token_1|> import secrets from pathlib import Path HASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server' ROOT_PRIVATE_DIR = Path(__file__).parent.parent WORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists' WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists' RULES_DIR = ROOT_PRIVATE_DIR / 'rules' MASKS_DIR = ROOT_PRIVATE_DIR / 'masks' LOGS_DIR = ROOT_PRIVATE_DIR / 'logs' DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database' ESSID_TRIED = DATABASE_DIR / 'essid_tried' DATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db' HASHCAT_STATUS_TIMER = 20 BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv' HASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' / 'hashcat_brain_password') HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True) WORDLISTS_USER_DIR.mkdir(exist_ok=True) LOGS_DIR.mkdir(exist_ok=True) DATABASE_DIR.mkdir(exist_ok=True) HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True) class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures' <|reserved_special_token_1|> import secrets from pathlib import Path HASHCAT_WPA_CACHE_DIR = Path.home() / ".hashcat" / "wpa-server" ROOT_PRIVATE_DIR = Path(__file__).parent.parent WORDLISTS_DIR = ROOT_PRIVATE_DIR / "wordlists" WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / "wordlists" # user custom wordlists RULES_DIR = ROOT_PRIVATE_DIR / "rules" MASKS_DIR = ROOT_PRIVATE_DIR / "masks" LOGS_DIR = ROOT_PRIVATE_DIR / "logs" DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / "database" ESSID_TRIED = DATABASE_DIR / "essid_tried" DATABASE_PATH = DATABASE_DIR / "hashcat_wpa.db" # Hashcat HASHCAT_STATUS_TIMER = 20 # seconds BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / "benchmark.csv" HASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / "brain" / "hashcat_brain_password" # mkdirs HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True) WORDLISTS_USER_DIR.mkdir(exist_ok=True) LOGS_DIR.mkdir(exist_ok=True) DATABASE_DIR.mkdir(exist_ok=True) HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True) class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) # Flask-SQLAlchemy settings SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False # Airodump capture files CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / "captures"
flexible
{ "blob_id": "20d480517226cb7fbced765554a02fa5cbc29033", "index": 6491, "step-1": "<mask token>\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-2": "<mask token>\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-3": "<mask token>\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-4": "import secrets\nfrom pathlib import Path\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-5": "import secrets\nfrom pathlib import Path\n\nHASHCAT_WPA_CACHE_DIR = Path.home() / \".hashcat\" / \"wpa-server\"\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\n\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / \"wordlists\"\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / \"wordlists\" # user custom wordlists\nRULES_DIR = ROOT_PRIVATE_DIR / \"rules\"\nMASKS_DIR = ROOT_PRIVATE_DIR / \"masks\"\nLOGS_DIR = ROOT_PRIVATE_DIR / \"logs\"\n\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / \"database\"\nESSID_TRIED = DATABASE_DIR / \"essid_tried\"\nDATABASE_PATH = DATABASE_DIR / \"hashcat_wpa.db\"\n\n# Hashcat\nHASHCAT_STATUS_TIMER = 20 # seconds\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / \"benchmark.csv\"\nHASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / \"brain\" / \"hashcat_brain_password\"\n\n# mkdirs\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n\n SECRET_KEY = secrets.token_bytes(64)\n\n # Flask-SQLAlchemy settings\n SQLALCHEMY_DATABASE_URI = \"sqlite:///{}\".format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n # Airodump capture files\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / \"captures\"\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from random import choice, random, randrange from math import fsum import os import numpy as np def mat17(N, ATOM_TYPES, ndenmax=0.04302, ndenmin=0.0000013905, xmax=51.2, xmin=25.6, ymax=51.2, ymin=25.6, zmax=51.2, zmin=25.6, epmax=513.264, epmin=1.2580, sigmax=6.549291, sigmin=1.052342, qmax=0.0, qmin=0.0): #epmax DEFINED WRT TO X-Y-Z LIMITS? #max number density based on that of pure Iron #max unit cell dimensions based on PCN-777 cages size #max LJ parameters (for now using 1.5x highest values in GenericMOFs) #max charge... UFF? #ATOM_TYPES = 4 if type(N) != int: print 'N must be an integer.' Ntag = str(N) ntag = str(ndenmax) xtag = str(xmax) ytag = str(xmax) ztag = str(xmax) eptag = str(xmax) sigtag = str(xmax) qtag = str(xmax) top_path = ('materials' + '_' + Ntag + '.' + ntag + '_' + xtag + '.' + ytag + '.' + ztag + '_' + eptag + '.' + sigtag + '_' + qtag) if not os.path.exists(top_path): os.mkdir(top_path) # def drange(start, stop, step): # r = start # while r < stop: # yield r # r+= step # nden0 = drange(1, ndenmax*10000, ndenp*10000) # ndendim = [nden for nden in nden0] # x0 = drange(0, xmax + xp, xp) # xdim = [x for x in x0] # y0 = drange(0, ymax + yp, yp) # ydim = [y for y in y0] # z0 = drange(0, zmax + zp, zp) # zdim = [z for z in z0] # ep0 = drange(0, epmax + epp, epp) # epdim = [ep for ep in ep0] # sig0 = drange(0, sigmax + sigp, sigp) # sigdim = [sig for sig in sig0] #open mat_stats.txt, to track material data mat_stats = open(os.path.abspath(top_path)+ '/mat_stats.txt', 'w') mat_stat_heading = ('\nBOUNDARIES\nNumber of particles: ' + Ntag + '\nnumber density: ' + ntag + '\nx-coordinate: ' + xtag + '\ny-coordinate: ' + ytag + '\nz-coordinate: ' + ztag + '\nEpsilon: ' + eptag + '\nSigma: ' + sigtag + '\nCharge: ' + qtag + '\n\n' + '#name number density xdim ydim '+ 'zdim total particles net charge\n') mat_stats.write(mat_stat_heading) #MAT-XXX loop... for i in range(N + 1): mat_name = 'MAT-' + str(i) #make MAT-XXX directory os.mkdir(top_path+'/'+mat_name) #open .cif file cif_file = open(os.path.abspath(top_path) + '/'+mat_name + '/' + mat_name+'.cif', 'w') #open force_field_mixing_rules.def mixing_rules = open(os.path.abspath(top_path) + '/'+mat_name + '/force_field_mixing_rules.def', 'w') #open pseudo_atoms.def pseudo_atoms = open(os.path.abspath(top_path) + '/'+mat_name + '/pseudo_atoms.def', 'w') #open force_field.def force_field = open(os.path.abspath(top_path) + '/'+mat_name + '/force_field.def', 'w') #nden_ = choice(ndendim)/10000. #xdim_ = choice(xdim) #ydim_ = choice(ydim) #zdim_ = choice(zdim) #nden_ = randrange(0.0001, ndenmax, 1) #xdim_ = randrange(15., xmax, 0.1) #ydim_ = randrange(15., ymax, 0.1) #zdim_ = randrange(15., zmax, 0.1) #N_ = xdim_ * ydim_ * zdim_ * nden_ #n_ = int(N_) nden_ = round(random() * (ndenmax - ndenmin) + ndenmin, 6) xdim_ = round(random() * (xmax - xmin) + xmin, 4) ydim_ = round(random() * (ymax - ymin) + ymin, 4) zdim_ = round(random() * (zmax - zmin) + zmin, 4) N_ = xdim_ * ydim_ * zdim_ * nden_ n_ = int(N_) cif_heading = ('material' + str(i) + '\n\nloop_\n' + '_symmetry_equiv_pos_as_xyz\n' + ' x,y,z\n' + '_cell_length_a ' + str(xdim_) + '\n_cell_length_b ' + str(ydim_) + '\n_cell_length_c ' + str(zdim_) + '\n_cell_angle_alpha 90.0000\n' + '_cell_angle_beta 90.0000\n' + '_cell_angle_gamma 90.0000\n' + 'loop_\n' + '_atom_site_label\n' + '_atom_site_type_symbol\n' + '_atom_site_fract_x\n' + '_atom_site_fract_y\n' + '_atom_site_fract_z\n' + '_atom_site_charge\n') cif_file.write(cif_heading) # mixing_heading = ('# general rule for shifted vs truncated\nshifted\n' + # '# general rule for tailcorrections\nno\n' + # '# number of defined interactions\n' + str(108) + #check these + XXX values # '\n# type interaction\n') mixing_heading = ('# general rule for shifted vs truncated\n' + 'shifted\n' + '# general rule tailcorrections\n' + 'no\n' + '# number of defined interactions\n' + str(ATOM_TYPES + 8) + '\n' + '# type interaction, parameters. IMPORTANT: define shortest matches first, so that more specific ones overwrites these\n') mixing_rules.write(mixing_heading) pseudo_heading = ('#number of pseudo atoms\n' + str(ATOM_TYPES + 8) + '\n#type print as chem oxidation' + ' mass charge polarization ' + 'B-factor radii connectivity anisotropic' + ' anisotrop-type tinker-type\n') pseudo_atoms.write(pseudo_heading) ##make charges #q = [] #for k in range(n_ + 1): # q.append(0) #for l in range(5*(n_ + 1)): # m = choice(range(n_ + 1)) # n = choice(range(n_ + 1)) # if m == n: # n = choice(range(n_ + 1)) # dq = random() * qmax # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax: # q[m] = float(float(q[m]) + dq) # q[n] = float(float(q[n]) - dq) # if q[m] > qmax or q[n] < -1 * qmax: # q[m] = q[m] - dq # q[n] = q[n] + dq #for o in range(5*(n_ + 1)): # m = choice(range(n_ + 1)) # n = choice(range(n_ + 1)) # if m == n: # n = choice(range(n_ + 1)) # dq = random() * qmax # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax: # q[m] = float(float(q[m]) + dq) # q[n] = float(float(q[n]) - dq) # if q[m] > qmax or q[n] < -1 * qmax: # q[m] = q[m] - dq # q[n] = q[n] + dq #p = choice(range(n_ + 1)) #q[p] = q[p] - sum(q) #if sum(q) != 0.000000000000000000000: # for l in range(5*(n_ + 1)): # m = choice(range(n_ + 1)) # n = choice(range(n_ + 1)) # if m == n: # n = choice(range(n_ + 1)) # dq = random() * qmax # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax: # q[m] = float(float(q[m]) + dq) # q[n] = float(float(q[n]) - dq) # if q[m] > qmax or q[n] < -1 * qmax: # q[m] = q[m] - dq # q[n] = q[n] + dq # for o in range(5*(n_ + 1)): # m = choice(range(n_ + 1)) # n = choice(range(n_ + 1)) # if m == n: # n = choice(range(n_ + 1)) # dq = random() * qmax # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax: # q[m] = float(float(q[m]) + dq) # q[n] = float(float(q[n]) - dq) # if q[m] > qmax or q[n] < -1 * qmax: # q[m] = q[m] - dq # q[n] = q[n] + dq # p = choice(range(n_ + 1)) # q[p] = q[p] - sum(q) #LJ parameters ep = [] sig = [] q = [] for i in range(ATOM_TYPES): epsilon = round(random() * (epmax - epmin) + epmin, 4) ep.append(epsilon) sigma = round(random() * (sigmax -sigmin) + sigmin, 4) sig.append(sigma) charge = 0 q.append(charge) ep_ = np.asarray(ep) sig_ = np.asarray(sig) q_ = np.asarray(q) ID_ = np.asarray(range(0,ATOM_TYPES)) ep = ep_.reshape(-1,1) sig = sig_.reshape(-1,1) q = q_.reshape(-1,1) ID = ID_.reshape(-1,1) atoms = np.hstack((ID, ep, sig, q)) n_atoms = np.empty([0, 4]) for i in range(n_): atomtype = choice(range(ATOM_TYPES)) n_atoms = np.vstack([n_atoms, atoms[atomtype, :]]) IDs = n_atoms[:,0] for i in range(ATOM_TYPES): if i in IDs: charge = round(random() * (qmax - qmin) + qmin, 4) weight_i = list(IDs).count(i) k = choice(IDs) weight_k = list(IDs).count(k) for j in range(n_): if n_atoms[j,0] == i: n_atoms[j,3] = n_atoms[j,3] + charge * int(weight_k) atoms[i,3] = n_atoms[j,3] + charge * int(weight_k) if n_atoms[j,0] == k: n_atoms[j,3] = n_atoms[j,3] - charge * int(weight_i) atoms[k,3] = n_atoms[j,3] - charge * int(weight_i) # for i in range(100): # atoms[i,3] = round(atoms[i,3], 4) # for i in range(n_): # n_atoms[i,3] = round(n_atoms[i,3], 4) # net_charge = sum(n_atoms[:,3]) # if net_charge != 0: # atomID = choice(range(100)) # weight = list(IDs).count(atomID) # atoms[atomID,3] = atoms[atomID,3] - net_charge/weight # for i in range(n_): # if n_atoms[i,0] == atomID: # n_atoms[atomID,3] = n_atoms[atomID,3] - net_charge/weight mat_charge = str(sum(n_atoms[:,3])) cif_file.write('#NET CHARGE: ' + mat_charge + '\n') mat_X_stats = (mat_name + ' ' + str(nden_) + ' ' + str(xdim_) + ' ' + str(ydim_) + ' ' + str(zdim_) + ' ' + str(n_) + ' ' + mat_charge + '\n') mat_stats.write(mat_X_stats) eps = n_atoms[:,1] sigs = n_atoms[:,2] qs = n_atoms[:,3] #writing mixing_rules, pseudo_atoms... for i in range(ATOM_TYPES): atom_X_pseudo = ('A_' + str(int(atoms[i,0])) + ' yes C C 0 ' + '12.0 ' + str(atoms[i,3]) + ' 0.0 0.0 ' + '1.0 1.00 0 0 absolute 0\n') pseudo_atoms.write(atom_X_pseudo) atom_X_mixing = ('A_' + str(int(atoms[i,0])) + ' ' + 'lennard-jones ' + str(atoms[i,1]) + ' ' + str(atoms[i,2]) + '\n') mixing_rules.write(atom_X_mixing) #writing cif... for i in range(n_): #FIX THIS TO ALLOW FOR NON-INT VALUES? x = choice(range(int(xdim_ + 1))) y = choice(range(int(ydim_ + 1))) z = choice(range(int(zdim_ + 1))) atom_X_cif = ('A_' + str(int(n_atoms[i,0])) + ' ' + 'C ' + str(round(x/xdim_, 4)) + ' ' + str(round(y/ydim_, 4)) + ' ' + str(round(z/zdim_, 4)) + ' ' + str(n_atoms[i,3]) + '\n') cif_file.write(atom_X_cif) # #ep = choice(epdim) # #sig = choice(sigdim) # epval = ep[atomtype] # sigval = sig[atomtype] # charge = q[n_] # #if charge < 0: # atom_X_cif = ('A' + str(atomtype) + ' ' + 'C ' + # str(x/xdim_) + ' ' + str(y/ydim_) + # ' ' + str(z/zdim_) + ' ' + # str(charge) + '\n') # cif_file.write(atom_X_cif) # for k in range(100): # if k != atomtype: # atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 0' + # ' 0.0 0.0 1.0 1.00 0 ' + # '0 absolute 0\n') # if k == atomtype: # atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 ' + # str(q[n_]) + ' 0.0 0.0 1.0 1.00 0 ' + # '0 absolute 0\n') # # pseudo_atoms.write(atom_X_pseudo) # # atom_X_mixing = ('A' + str(k) + ' LENNARD_JONES ' + # str(ep[k]) + ' ' + str(sig[k]) + '\n') # mixing_rules.write(atom_X_mixing) #if charge >= 0: # atom_X_cif = ('A' + str(atomtype) + ' ' + str(x) + ' ' + # str(y) + ' ' + str(z) + ' ' + # str(charge) + '\n') #cif_file.write(atom_X_cif) #for i in range(100): # atom_X_mixing = ('A' + str(i) + ' LENNARD_JONES ' + # str(ep[i]) + ' ' + str(sig[i]) + '\n') # mixing_rules.write(atom_X_mixing) # # atom_X_pseudo = ('A' + str(i) + ' yes C C 0 12.0 ' + # str(q[i]) + ' 0.0 0.0 1.0 1.00 0 ' + # '0 absolute 0\n') ## pseudo_atoms.write(atom_X_pseudo) #SUPPORTED ADSORBATES # name pseudo-atoms # N2 : N_n2; N_com # CO2 : C_co2; O_co2 # methane : CH4_sp3 # helium : He # hydrogen : H_h2; H_com # H2 : H_h2; H_com #adsorbate_mixing = ('N_n2 LENNARD_JONES 36.0 3.31\n' + # 'N_com none\n' + # 'C_co2 LENNARD_JONES 27.0 2.80\n' + # 'O_co2 LENNARD_JONES 79.0 3.05\n' + # 'CH4_sp3 LENNARD_JONES 158.5 3.72\n' + # 'He LENNARD_JONES 10.9 2.64\n' + # 'H_h2 none\n' + # 'H_com LENNARD_JONES 36.7 2.958\n' + # '# general mixing rule for Lennard-Jones\n' + # 'Lorentz-Berthlot') adsorbate_mixing = ('N_n2 lennard-jones 36.0 3.31\n' + 'N_com none\n' + 'C_co2 lennard-jones 27.0 2.80\n' + 'O_co2 lennard-jones 79.0 3.05\n' + 'CH4_sp3 lennard-jones 158.5 3.72\n' + 'He lennard-jones 10.9 2.64\n' + 'H_h2 none\n' + 'H_com lennard-jones 36.7 2.958\n' + '# general mixing rule for Lennard-Jones\n' + 'Lorentz-Berthelot') mixing_rules.write(adsorbate_mixing) adsorbate_pseudo = ('N_n2 yes N N 0 14.00674 -0.4048' + ' 0.0 1.0 0.7 0 0 relative 0\n' + 'N_com no N - 0 0.0 0.8096' + ' 0.0 1.0 0.7 0 0 relative 0\n' + 'C_co2 yes C C 0 12.0 0.70' + ' 0.0 1.0 0.720 0 0 relative 0\n' + 'O_co2 yes O O 0 15.9994 -0.35' + ' 0.0 1.0 0.68 0 0 relative 0\n' + 'CH4_sp3 yes C C 0 16.04246 0.0' + ' 0.0 1.0 1.00 0 0 relative 0\n' + 'He yes He He 0 4.002602 0.0' + ' 0.0 1.0 1.0 0 0 relative 0\n' + 'H_h2 yes H H 0 1.00794 0.468' + ' 0.0 1.0 0.7 0 0 relative 0\n' + 'H_com no H H 0 0.0 - 0.936' + ' 0.0 1.0 0.7 0 0 relative 0\n') pseudo_atoms.write(adsorbate_pseudo) force_field_rules = ('# rules to overwrite\n0\n' + '# number of defined interactions\n0\n' + '# mixing rules to overwrite\n0') force_field.write(force_field_rules) cif_file.close() mixing_rules.close() pseudo_atoms.close() force_field.close() mat_stats.close()
normal
{ "blob_id": "ba72af921a9562d748bcd65f1837ea8eb5da5697", "index": 150, "step-1": "from random import choice, random, randrange\nfrom math import fsum\nimport os\nimport numpy as np\n\ndef mat17(N, ATOM_TYPES, ndenmax=0.04302, ndenmin=0.0000013905, xmax=51.2, xmin=25.6, ymax=51.2, ymin=25.6,\nzmax=51.2, zmin=25.6, epmax=513.264, epmin=1.2580, sigmax=6.549291, sigmin=1.052342, qmax=0.0, qmin=0.0):\n#epmax DEFINED WRT TO X-Y-Z LIMITS?\n#max number density based on that of pure Iron\n#max unit cell dimensions based on PCN-777 cages size\n#max LJ parameters (for now using 1.5x highest values in GenericMOFs)\n#max charge... UFF?\n\n #ATOM_TYPES = 4\n\n if type(N) != int:\n print 'N must be an integer.'\n \n Ntag = str(N)\n ntag = str(ndenmax)\n xtag = str(xmax)\n ytag = str(xmax)\n ztag = str(xmax)\n eptag = str(xmax)\n sigtag = str(xmax)\n qtag = str(xmax)\n \n top_path = ('materials' + '_' + Ntag + '.' + ntag + '_' + xtag + '.' + ytag\n\t\t + '.' + ztag + '_' + eptag + '.' + sigtag + '_' + qtag)\n \n if not os.path.exists(top_path):\n os.mkdir(top_path) \n\n# def drange(start, stop, step):\n# r = start\n# while r < stop:\n# yield r\n# r+= step\n \n# nden0 = drange(1, ndenmax*10000, ndenp*10000)\n# ndendim = [nden for nden in nden0]\n \n# x0 = drange(0, xmax + xp, xp)\n# xdim = [x for x in x0]\n \n# y0 = drange(0, ymax + yp, yp)\n# ydim = [y for y in y0]\n \n# z0 = drange(0, zmax + zp, zp)\n# zdim = [z for z in z0]\n \n# ep0 = drange(0, epmax + epp, epp)\n# epdim = [ep for ep in ep0]\n# sig0 = drange(0, sigmax + sigp, sigp)\n# sigdim = [sig for sig in sig0] \n\n\n #open mat_stats.txt, to track material data \n mat_stats = open(os.path.abspath(top_path)+ '/mat_stats.txt', 'w')\n mat_stat_heading = ('\\nBOUNDARIES\\nNumber of particles: ' + Ntag +\n \t'\\nnumber density: ' + ntag + '\\nx-coordinate: ' +\n\t\t\txtag + '\\ny-coordinate: ' + ytag + '\\nz-coordinate: ' +\n\t\t\t ztag + '\\nEpsilon: ' + eptag + '\\nSigma: ' + sigtag \n\t\t\t+ '\\nCharge: ' + qtag + '\\n\\n' +\n\t\t\t'#name number density xdim ydim '+\n\t\t\t'zdim total particles net charge\\n')\n mat_stats.write(mat_stat_heading)\n \n #MAT-XXX loop...\n for i in range(N + 1):\n \n mat_name = 'MAT-' + str(i)\n\n \n\t#make MAT-XXX directory\n\tos.mkdir(top_path+'/'+mat_name)\n\t\n\t#open .cif file\n cif_file = open(os.path.abspath(top_path) + '/'+mat_name + '/' + \n\t\t\tmat_name+'.cif', 'w')\n\t\n\t#open force_field_mixing_rules.def\n\tmixing_rules = open(os.path.abspath(top_path) + '/'+mat_name +\n\t\t\t'/force_field_mixing_rules.def', 'w')\n \n\t#open pseudo_atoms.def\n pseudo_atoms = open(os.path.abspath(top_path) + '/'+mat_name + \n\t\t\t'/pseudo_atoms.def', 'w')\n\t\n\t#open force_field.def\n force_field = open(os.path.abspath(top_path) + '/'+mat_name +\n\t\t\t'/force_field.def', 'w')\n\n \t#nden_ = choice(ndendim)/10000.\n #xdim_ = choice(xdim)\n #ydim_ = choice(ydim)\n #zdim_ = choice(zdim)\n #nden_ = randrange(0.0001, ndenmax, 1)\n #xdim_ = randrange(15., xmax, 0.1)\n\t#ydim_ = randrange(15., ymax, 0.1)\n #zdim_ = randrange(15., zmax, 0.1)\n #N_ = xdim_ * ydim_ * zdim_ * nden_\n #n_ = int(N_) \n nden_ = round(random() * (ndenmax - ndenmin) + ndenmin, 6)\n\txdim_ = round(random() * (xmax - xmin) + xmin, 4)\n ydim_ = round(random() * (ymax - ymin) + ymin, 4)\n zdim_ = round(random() * (zmax - zmin) + zmin, 4)\n N_ = xdim_ * ydim_ * zdim_ * nden_\n n_ = int(N_)\n\n cif_heading = ('material' + str(i) + \n\t\t\t'\\n\\nloop_\\n' +\n\t\t\t'_symmetry_equiv_pos_as_xyz\\n' +\n\t\t\t' x,y,z\\n' +\n\t\t\t'_cell_length_a ' + str(xdim_) +\n\t\t\t'\\n_cell_length_b ' + str(ydim_) +\n\t\t\t'\\n_cell_length_c ' + str(zdim_) + \n\t\t\t'\\n_cell_angle_alpha 90.0000\\n' +\n\t\t\t'_cell_angle_beta 90.0000\\n' +\n\t\t\t'_cell_angle_gamma 90.0000\\n' +\n\t\t\t'loop_\\n' +\n\t\t\t'_atom_site_label\\n' +\n\t\t\t'_atom_site_type_symbol\\n' +\n\t\t\t'_atom_site_fract_x\\n' +\n\t\t\t'_atom_site_fract_y\\n' +\n\t\t\t'_atom_site_fract_z\\n' +\n\t\t\t'_atom_site_charge\\n')\n\tcif_file.write(cif_heading)\n\n# mixing_heading = ('# general rule for shifted vs truncated\\nshifted\\n' +\n#\t\t\t'# general rule for tailcorrections\\nno\\n' +\n#\t\t\t'# number of defined interactions\\n' + str(108) + #check these + XXX values\n#\t\t\t'\\n# type interaction\\n')\n\n mixing_heading = ('# general rule for shifted vs truncated\\n' +\n 'shifted\\n' +\n '# general rule tailcorrections\\n' +\n 'no\\n' +\n '# number of defined interactions\\n' +\n str(ATOM_TYPES + 8) + '\\n' +\n '# type interaction, parameters. IMPORTANT: define shortest matches first, so that more specific ones overwrites these\\n')\n mixing_rules.write(mixing_heading)\n \n pseudo_heading = ('#number of pseudo atoms\\n' + str(ATOM_TYPES + 8) + \n\t\t\t'\\n#type print as chem oxidation' +\n\t\t\t' mass charge polarization ' +\n\t\t\t'B-factor radii connectivity anisotropic' +\n\t\t\t' anisotrop-type tinker-type\\n')\n pseudo_atoms.write(pseudo_heading)\n \n ##make charges\n #q = []\n \t#for k in range(n_ + 1):\n # q.append(0)\n #for l in range(5*(n_ + 1)):\n # m = choice(range(n_ + 1))\n # n = choice(range(n_ + 1))\n # if m == n:\n # n = choice(range(n_ + 1))\n # dq = random() * qmax\n # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:\n # q[m] = float(float(q[m]) + dq)\n # q[n] = float(float(q[n]) - dq)\n # if q[m] > qmax or q[n] < -1 * qmax:\n # q[m] = q[m] - dq\n # q[n] = q[n] + dq\n #for o in range(5*(n_ + 1)):\n # m = choice(range(n_ + 1))\n # n = choice(range(n_ + 1))\n # if m == n:\n # n = choice(range(n_ + 1))\n # dq = random() * qmax\n # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:\n # q[m] = float(float(q[m]) + dq)\n # q[n] = float(float(q[n]) - dq)\n # if q[m] > qmax or q[n] < -1 * qmax:\n # q[m] = q[m] - dq\n # q[n] = q[n] + dq\n #p = choice(range(n_ + 1))\n #q[p] = q[p] - sum(q)\n #if sum(q) != 0.000000000000000000000:\n # for l in range(5*(n_ + 1)):\n # m = choice(range(n_ + 1))\n # n = choice(range(n_ + 1))\n # if m == n:\n # n = choice(range(n_ + 1))\n # dq = random() * qmax\n # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:\n # q[m] = float(float(q[m]) + dq)\n # q[n] = float(float(q[n]) - dq)\n # if q[m] > qmax or q[n] < -1 * qmax:\n # q[m] = q[m] - dq\n # q[n] = q[n] + dq\n # for o in range(5*(n_ + 1)):\n # m = choice(range(n_ + 1))\n # n = choice(range(n_ + 1))\n # if m == n:\n # n = choice(range(n_ + 1))\n # dq = random() * qmax\n # if q[m] + dq <= qmax and q[n] - dq >= -1 * qmax:\n # q[m] = float(float(q[m]) + dq)\n # q[n] = float(float(q[n]) - dq)\n # if q[m] > qmax or q[n] < -1 * qmax:\n # q[m] = q[m] - dq\n # q[n] = q[n] + dq\n # p = choice(range(n_ + 1))\n # q[p] = q[p] - sum(q)\n\t\n #LJ parameters\n\n\tep = []\n\tsig = []\n q = []\n for i in range(ATOM_TYPES):\n epsilon = round(random() * (epmax - epmin) + epmin, 4)\n ep.append(epsilon)\n sigma = round(random() * (sigmax -sigmin) + sigmin, 4)\n sig.append(sigma)\n charge = 0\n q.append(charge)\n \n ep_ = np.asarray(ep)\n sig_ = np.asarray(sig)\n q_ = np.asarray(q)\n ID_ = np.asarray(range(0,ATOM_TYPES))\n\n ep = ep_.reshape(-1,1)\n sig = sig_.reshape(-1,1)\n q = q_.reshape(-1,1)\n ID = ID_.reshape(-1,1)\n\n atoms = np.hstack((ID, ep, sig, q))\n\n n_atoms = np.empty([0, 4])\n for i in range(n_):\n atomtype = choice(range(ATOM_TYPES))\n n_atoms = np.vstack([n_atoms, atoms[atomtype, :]])\n \n IDs = n_atoms[:,0]\n\n for i in range(ATOM_TYPES):\n if i in IDs:\n charge = round(random() * (qmax - qmin) + qmin, 4)\n weight_i = list(IDs).count(i)\n k = choice(IDs)\n weight_k = list(IDs).count(k)\n for j in range(n_):\n if n_atoms[j,0] == i:\n n_atoms[j,3] = n_atoms[j,3] + charge * int(weight_k)\n\t\t\tatoms[i,3] = n_atoms[j,3] + charge * int(weight_k)\n if n_atoms[j,0] == k:\n n_atoms[j,3] = n_atoms[j,3] - charge * int(weight_i)\n atoms[k,3] = n_atoms[j,3] - charge * int(weight_i)\n\n# for i in range(100):\n# atoms[i,3] = round(atoms[i,3], 4)\n\n# for i in range(n_):\n# n_atoms[i,3] = round(n_atoms[i,3], 4)\n\n\n\n# net_charge = sum(n_atoms[:,3])\n# if net_charge != 0:\n# atomID = choice(range(100))\n# weight = list(IDs).count(atomID)\n# atoms[atomID,3] = atoms[atomID,3] - net_charge/weight\n# for i in range(n_):\n# if n_atoms[i,0] == atomID:\n# n_atoms[atomID,3] = n_atoms[atomID,3] - net_charge/weight\n\n\n mat_charge = str(sum(n_atoms[:,3]))\n\tcif_file.write('#NET CHARGE: ' + mat_charge + '\\n')\n\tmat_X_stats = (mat_name + ' ' + str(nden_) + ' ' + str(xdim_) + ' ' + str(ydim_) +\n\t\t\t' ' + str(zdim_) + ' ' + str(n_) + ' ' + \n\t\t\tmat_charge + '\\n')\n\tmat_stats.write(mat_X_stats)\n\t\n eps = n_atoms[:,1]\n sigs = n_atoms[:,2]\n qs = n_atoms[:,3]\n\t\n\t#writing mixing_rules, pseudo_atoms...\n for i in range(ATOM_TYPES):\n\n atom_X_pseudo = ('A_' + str(int(atoms[i,0])) + ' yes C C 0 ' +\n '12.0 ' + str(atoms[i,3]) + ' 0.0 0.0 ' +\n '1.0 1.00 0 0 absolute 0\\n')\n pseudo_atoms.write(atom_X_pseudo)\n\n atom_X_mixing = ('A_' + str(int(atoms[i,0])) + ' ' +\n 'lennard-jones ' + str(atoms[i,1]) + ' '\n + str(atoms[i,2]) + '\\n')\n mixing_rules.write(atom_X_mixing) \n\n\t#writing cif...\n\n for i in range(n_):\n#FIX THIS TO ALLOW FOR NON-INT VALUES?\n x = choice(range(int(xdim_ + 1)))\n y = choice(range(int(ydim_ + 1)))\n z = choice(range(int(zdim_ + 1)))\n \n atom_X_cif = ('A_' + str(int(n_atoms[i,0])) + ' ' + 'C ' + \n str(round(x/xdim_, 4)) + ' ' + str(round(y/ydim_, 4)) + \n ' ' + str(round(z/zdim_, 4)) + ' ' +\n str(n_atoms[i,3]) + '\\n') \n cif_file.write(atom_X_cif)\n\n\n\n # #ep = choice(epdim)\n # #sig = choice(sigdim)\n # epval = ep[atomtype]\n # sigval = sig[atomtype]\n # charge = q[n_]\n # #if charge < 0:\n # atom_X_cif = ('A' + str(atomtype) + ' ' + 'C ' + \n\t#\t\t\tstr(x/xdim_) + ' ' + str(y/ydim_) + \n\t#\t\t\t' ' + str(z/zdim_) + ' ' +\n\t#\t\t\tstr(charge) + '\\n') \n # cif_file.write(atom_X_cif)\n # for k in range(100):\n # if k != atomtype:\n # atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 0' +\n\t#\t\t\t ' 0.0 0.0 1.0 1.00 0 ' +\n\t#\t\t\t '0 absolute 0\\n')\n # if k == atomtype:\n # atom_X_pseudo = ('A' + str(k) + ' yes C C 0 12.0 ' +\n\t#\t\t\t str(q[n_]) + ' 0.0 0.0 1.0 1.00 0 ' +\n\t#\t\t\t '0 absolute 0\\n')\n # \n # pseudo_atoms.write(atom_X_pseudo)\n # \n # atom_X_mixing = ('A' + str(k) + ' LENNARD_JONES ' +\n\t#\t\t\t str(ep[k]) + ' ' + str(sig[k]) + '\\n')\n # mixing_rules.write(atom_X_mixing)\n\n \n\n #if charge >= 0:\n # atom_X_cif = ('A' + str(atomtype) + ' ' + str(x) + ' ' +\n\t\t#\t\tstr(y) + ' ' + str(z) + ' ' +\n\t\t#\t\tstr(charge) + '\\n')\n\t\t#cif_file.write(atom_X_cif)\n \t\n #for i in range(100):\n\n # atom_X_mixing = ('A' + str(i) + ' LENNARD_JONES ' +\n\t#\t\t\tstr(ep[i]) + ' ' + str(sig[i]) + '\\n')\n # mixing_rules.write(atom_X_mixing)\n#\n # atom_X_pseudo = ('A' + str(i) + ' yes C C 0 12.0 ' +\n#\t\t\t\tstr(q[i]) + ' 0.0 0.0 1.0 1.00 0 ' +\n#\t\t\t\t'0 absolute 0\\n')\n ## pseudo_atoms.write(atom_X_pseudo)\n \n#SUPPORTED ADSORBATES\n# name pseudo-atoms\n# N2 : N_n2; N_com\n# CO2 : C_co2; O_co2\n# methane : CH4_sp3\n# helium : He\n# hydrogen : H_h2; H_com\n# H2 : H_h2; H_com\n\n #adsorbate_mixing = ('N_n2 LENNARD_JONES 36.0 3.31\\n' +\n\t#\t\t'N_com none\\n' +\n\t#\t\t'C_co2 LENNARD_JONES 27.0 2.80\\n' +\n\t#\t\t'O_co2 LENNARD_JONES 79.0 3.05\\n' +\n\t#\t\t'CH4_sp3 LENNARD_JONES 158.5 3.72\\n' +\n\t#\t\t'He LENNARD_JONES 10.9 2.64\\n' +\n\t#\t\t'H_h2 none\\n' +\n\t#\t\t'H_com LENNARD_JONES 36.7 2.958\\n' +\n\t#\t\t'# general mixing rule for Lennard-Jones\\n' +\n\t#\t\t'Lorentz-Berthlot')\n adsorbate_mixing = ('N_n2 lennard-jones 36.0 3.31\\n' +\n 'N_com none\\n' +\n 'C_co2 lennard-jones 27.0 2.80\\n' +\n 'O_co2 lennard-jones 79.0 3.05\\n' +\n 'CH4_sp3 lennard-jones 158.5 3.72\\n' +\n 'He lennard-jones 10.9 2.64\\n' +\n 'H_h2 none\\n' +\n 'H_com lennard-jones 36.7 2.958\\n' +\n '# general mixing rule for Lennard-Jones\\n' +\n 'Lorentz-Berthelot')\n mixing_rules.write(adsorbate_mixing)\n\n adsorbate_pseudo = ('N_n2 yes N N 0 14.00674 -0.4048' +\n\t\t\t' 0.0 1.0 0.7 0 0 relative 0\\n' +\n\t\t\t'N_com no N - 0 0.0 0.8096' +\n\t\t\t' 0.0 1.0 0.7 0 0 relative 0\\n' +\n\t\t\t'C_co2 yes C C 0 12.0 0.70' +\n\t\t\t' 0.0 1.0 0.720 0 0 relative 0\\n' +\n\t\t\t'O_co2 yes O O 0 15.9994 -0.35' +\n\t\t\t' 0.0 1.0 0.68 0 0 relative 0\\n' +\n\t\t\t'CH4_sp3 yes C C 0 16.04246 0.0' +\n\t\t\t' 0.0 1.0 1.00 0 0 relative 0\\n' +\n\t\t\t'He yes He He 0 4.002602 0.0' +\n\t\t\t' 0.0 1.0 1.0 0 0 relative 0\\n' +\n\t\t\t'H_h2 yes H H 0 1.00794 0.468' +\n\t\t\t' 0.0 1.0 0.7 0 0 relative 0\\n' +\n\t\t\t'H_com no H H 0 0.0 - 0.936' +\n\t\t\t' 0.0 1.0 0.7 0 0 relative 0\\n')\n pseudo_atoms.write(adsorbate_pseudo)\n \n force_field_rules = ('# rules to overwrite\\n0\\n' +\n\t\t\t\t'# number of defined interactions\\n0\\n' +\n\t\t\t\t'# mixing rules to overwrite\\n0')\n\tforce_field.write(force_field_rules)\n \n cif_file.close()\n mixing_rules.close()\n pseudo_atoms.close()\n force_field.close()\n mat_stats.close()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def decrypt(message): message = base64.urlsafe_b64decode(message) iv = message[:16] signed_data = message[16:36] encrypted_data = message[36:] cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}") print(f'signed {len(signed_data)} {signed_data}') print( f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}" ) decryptor = cipher.decryptor() plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize() plaintext_message = plaintext_message.split(b'\x00')[0] print('result') print(hexlify(plaintext_message).decode('ascii')) try: plaintext_message = plaintext_message.decode('utf-8') except: print('could not decode') return plaintext_message <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def decrypt(message): message = base64.urlsafe_b64decode(message) iv = message[:16] signed_data = message[16:36] encrypted_data = message[36:] cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}") print(f'signed {len(signed_data)} {signed_data}') print( f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}" ) decryptor = cipher.decryptor() plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize() plaintext_message = plaintext_message.split(b'\x00')[0] print('result') print(hexlify(plaintext_message).decode('ascii')) try: plaintext_message = plaintext_message.decode('utf-8') except: print('could not decode') return plaintext_message print(decrypt( 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw==' )) <|reserved_special_token_1|> <|reserved_special_token_0|> backend = default_backend() key = key = b'vcOqXPg==lz3M0IH4swwYCR/'[:16] def decrypt(message): message = base64.urlsafe_b64decode(message) iv = message[:16] signed_data = message[16:36] encrypted_data = message[36:] cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}") print(f'signed {len(signed_data)} {signed_data}') print( f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}" ) decryptor = cipher.decryptor() plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize() plaintext_message = plaintext_message.split(b'\x00')[0] print('result') print(hexlify(plaintext_message).decode('ascii')) try: plaintext_message = plaintext_message.decode('utf-8') except: print('could not decode') return plaintext_message print(decrypt( 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw==' )) <|reserved_special_token_1|> import os import base64 from binascii import hexlify from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives import hashes, hmac from cryptography.hazmat.backends import default_backend backend = default_backend() key = key = b'vcOqXPg==lz3M0IH4swwYCR/'[:16] def decrypt(message): message = base64.urlsafe_b64decode(message) iv = message[:16] signed_data = message[16:36] encrypted_data = message[36:] cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}") print(f'signed {len(signed_data)} {signed_data}') print( f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}" ) decryptor = cipher.decryptor() plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize() plaintext_message = plaintext_message.split(b'\x00')[0] print('result') print(hexlify(plaintext_message).decode('ascii')) try: plaintext_message = plaintext_message.decode('utf-8') except: print('could not decode') return plaintext_message print(decrypt( 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw==' )) <|reserved_special_token_1|> import os import base64 from binascii import hexlify from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives import hashes, hmac from cryptography.hazmat.backends import default_backend backend = default_backend() # Llave falsa key = key = b"vcOqXPg==lz3M0IH4swwYCR/"[:16] def decrypt(message): message = base64.urlsafe_b64decode(message) iv = message[:16] signed_data = message[16:36] encrypted_data = message[36:] cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}") print(f"signed {len(signed_data)} {signed_data}") print( f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}" ) decryptor = cipher.decryptor() plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize() # Remove null padding if it exists plaintext_message = plaintext_message.split(b"\x00")[0] print("result") print(hexlify(plaintext_message).decode("ascii")) try: plaintext_message = plaintext_message.decode("utf-8") except: print("could not decode") return plaintext_message print( decrypt( "JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw==" ) )
flexible
{ "blob_id": "c33aedbd5aaa853131c297a9382b72c3c646a319", "index": 4006, "step-1": "<mask token>\n\n\ndef decrypt(message):\n message = base64.urlsafe_b64decode(message)\n iv = message[:16]\n signed_data = message[16:36]\n encrypted_data = message[36:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\n print(f\"iv {len(iv)} {hexlify(iv).decode('ascii')}\")\n print(f'signed {len(signed_data)} {signed_data}')\n print(\n f\"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}\"\n )\n decryptor = cipher.decryptor()\n plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()\n plaintext_message = plaintext_message.split(b'\\x00')[0]\n print('result')\n print(hexlify(plaintext_message).decode('ascii'))\n try:\n plaintext_message = plaintext_message.decode('utf-8')\n except:\n print('could not decode')\n return plaintext_message\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef decrypt(message):\n message = base64.urlsafe_b64decode(message)\n iv = message[:16]\n signed_data = message[16:36]\n encrypted_data = message[36:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\n print(f\"iv {len(iv)} {hexlify(iv).decode('ascii')}\")\n print(f'signed {len(signed_data)} {signed_data}')\n print(\n f\"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}\"\n )\n decryptor = cipher.decryptor()\n plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()\n plaintext_message = plaintext_message.split(b'\\x00')[0]\n print('result')\n print(hexlify(plaintext_message).decode('ascii'))\n try:\n plaintext_message = plaintext_message.decode('utf-8')\n except:\n print('could not decode')\n return plaintext_message\n\n\nprint(decrypt(\n 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw=='\n ))\n", "step-3": "<mask token>\nbackend = default_backend()\nkey = key = b'vcOqXPg==lz3M0IH4swwYCR/'[:16]\n\n\ndef decrypt(message):\n message = base64.urlsafe_b64decode(message)\n iv = message[:16]\n signed_data = message[16:36]\n encrypted_data = message[36:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\n print(f\"iv {len(iv)} {hexlify(iv).decode('ascii')}\")\n print(f'signed {len(signed_data)} {signed_data}')\n print(\n f\"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}\"\n )\n decryptor = cipher.decryptor()\n plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()\n plaintext_message = plaintext_message.split(b'\\x00')[0]\n print('result')\n print(hexlify(plaintext_message).decode('ascii'))\n try:\n plaintext_message = plaintext_message.decode('utf-8')\n except:\n print('could not decode')\n return plaintext_message\n\n\nprint(decrypt(\n 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw=='\n ))\n", "step-4": "import os\nimport base64\nfrom binascii import hexlify\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives import hashes, hmac\nfrom cryptography.hazmat.backends import default_backend\nbackend = default_backend()\nkey = key = b'vcOqXPg==lz3M0IH4swwYCR/'[:16]\n\n\ndef decrypt(message):\n message = base64.urlsafe_b64decode(message)\n iv = message[:16]\n signed_data = message[16:36]\n encrypted_data = message[36:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\n print(f\"iv {len(iv)} {hexlify(iv).decode('ascii')}\")\n print(f'signed {len(signed_data)} {signed_data}')\n print(\n f\"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}\"\n )\n decryptor = cipher.decryptor()\n plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()\n plaintext_message = plaintext_message.split(b'\\x00')[0]\n print('result')\n print(hexlify(plaintext_message).decode('ascii'))\n try:\n plaintext_message = plaintext_message.decode('utf-8')\n except:\n print('could not decode')\n return plaintext_message\n\n\nprint(decrypt(\n 'JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw=='\n ))\n", "step-5": "import os\nimport base64\nfrom binascii import hexlify\n\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives import hashes, hmac\nfrom cryptography.hazmat.backends import default_backend\n\n\nbackend = default_backend()\n# Llave falsa\nkey = key = b\"vcOqXPg==lz3M0IH4swwYCR/\"[:16]\n\n\ndef decrypt(message):\n message = base64.urlsafe_b64decode(message)\n iv = message[:16]\n signed_data = message[16:36]\n encrypted_data = message[36:]\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)\n print(f\"iv {len(iv)} {hexlify(iv).decode('ascii')}\")\n print(f\"signed {len(signed_data)} {signed_data}\")\n print(\n f\"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}\"\n )\n decryptor = cipher.decryptor()\n plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()\n # Remove null padding if it exists\n plaintext_message = plaintext_message.split(b\"\\x00\")[0]\n print(\"result\")\n print(hexlify(plaintext_message).decode(\"ascii\"))\n try:\n plaintext_message = plaintext_message.decode(\"utf-8\")\n except:\n print(\"could not decode\")\n return plaintext_message\n\n\nprint(\n decrypt(\n \"JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw==\"\n )\n)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
''' * @Author: Mohammad Fatha. * @Date: 2021-09-17 19:50 * @Last Modified by: Mohammad Fatha * @Last Modified time: 2021-09-17 19:55 * @Title: Gambler Game ''' import random def gamblerProblem(): """ Description: This function Simulates a gambler who start with stake and place fair 1 bets until he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of times he/she wins and the number of bets he/she makes. Run the experiment N times, averages the results, print the results. """ stake=int(input("Enter The The Stake Amount:")) goal=int(input("Enter The Amount You Want To Win:")) bet_made=int(input("Enter The The Number Of Bets You Want To Make:")) no_of_times_won=0 no_of_time_lost=0 no_of_bets_made=0 while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made): no_of_bets_made+=1 gambler_choice=random.randint(0, 1) #generates a random number 0 or 1 if gambler_choice==1: #if the random number generated is 0 no_of_times_won+=1 stake=stake+1 else: no_of_time_lost+=1 stake=stake-1 percentage_win = (no_of_times_won/bet_made)*100 print("Number Of Times Won",no_of_times_won) print("Percentage Of Win", percentage_win) print("Percentage Of Loss", 100-percentage_win) print("Number Of Bets Made", no_of_bets_made) if __name__ == '__main__': gamblerProblem()
normal
{ "blob_id": "68904be892968d4a1d82a59a31b95a8133a30832", "index": 8790, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n", "step-4": "<mask token>\nimport random\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n", "step-5": "'''\n* @Author: Mohammad Fatha.\n* @Date: 2021-09-17 19:50 \n* @Last Modified by: Mohammad Fatha\n* @Last Modified time: 2021-09-17 19:55\n* @Title: Gambler Game\n'''\nimport random\n \ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake=int(input(\"Enter The The Stake Amount:\"))\n goal=int(input(\"Enter The Amount You Want To Win:\"))\n bet_made=int(input(\"Enter The The Number Of Bets You Want To Make:\"))\n no_of_times_won=0\n no_of_time_lost=0\n no_of_bets_made=0\n\n while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):\n no_of_bets_made+=1\n gambler_choice=random.randint(0, 1) #generates a random number 0 or 1\n \n if gambler_choice==1: #if the random number generated is 0\n no_of_times_won+=1\n stake=stake+1 \n else:\n no_of_time_lost+=1\n stake=stake-1\n\n percentage_win = (no_of_times_won/bet_made)*100\n print(\"Number Of Times Won\",no_of_times_won)\n print(\"Percentage Of Win\", percentage_win) \n print(\"Percentage Of Loss\", 100-percentage_win)\n print(\"Number Of Bets Made\", no_of_bets_made) \n \n\nif __name__ == '__main__':\n gamblerProblem() ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import psycopg2 from .connection import get_connection def get_clientes(): query = 'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes' cursor = get_connection(query) return cursor def get_clientes_by_id(_id): query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE documento = {_id}' get_connection(query, _id) def get_clientes_by_nombre(nombre): query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{nombre}%' ' get_connection(query, nombre) def get_clientes_by_fecha(fecha): query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{fecha}%' ' get_connection(query, fecha) def create_pdf(): pass def add_cliente(parametros): query = 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)' get_connection(query, parametros) print("Datos almacenados") get_clientes() def edit_cliente(_id, parametros): query = f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}' get_connection(query, parametros) print("Datos almacenados") get_clientes() def delete_cliente(_id): query = 'DELETE FROM clientes WHERE documento = {_id}' get_connection(query) ''' def delete_cliente(): #mensaje['text'] = '' #try: #.tabla.item(.tabla.selection())['text'][0] #except IndexError as e: # mensaje['text'] = 'Selecciona un producto' return #mensaje['text'] = '' nombre = .tabla.item(.tabla.selection())['text'] query = 'DELETE FROM clientes WHERE nombre = %s' .get_connection(query, (nombre,)) .mensaje['text'] = 'Cliente {} fue borrado correctamente'.format(nombre) .get_clientes() def edit_cliente(): .mensaje['text'] = '' try: .tabla.item(.tabla.selection())['text'][0] except IndexError as e: .mensaje['text'] = 'Selecciona un producto' return nombre = .tabla.item(.tabla.selection())['text'] documento = .tabla.item(.tabla.selection())['values'][0] .ventana_edit = Toplevel() .ventana_edit.title = 'Editar cliente' '''
normal
{ "blob_id": "035a87ccf21d45b2c147da4315c2143bea1ff21d", "index": 8173, "step-1": "<mask token>\n\n\ndef add_cliente(parametros):\n query = (\n 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_clientes_by_nombre(nombre):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {nombre} % ' ')\n get_connection(query, nombre)\n\n\ndef get_clientes_by_fecha(fecha):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {fecha} % ' ')\n get_connection(query, fecha)\n\n\n<mask token>\n\n\ndef add_cliente(parametros):\n query = (\n 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef edit_cliente(_id, parametros):\n query = (\n f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef delete_cliente(_id):\n query = 'DELETE FROM clientes WHERE documento = {_id}'\n get_connection(query)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_clientes():\n query = (\n 'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes'\n )\n cursor = get_connection(query)\n return cursor\n\n\ndef get_clientes_by_id(_id):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE documento = {_id}'\n )\n get_connection(query, _id)\n\n\ndef get_clientes_by_nombre(nombre):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {nombre} % ' ')\n get_connection(query, nombre)\n\n\ndef get_clientes_by_fecha(fecha):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {fecha} % ' ')\n get_connection(query, fecha)\n\n\n<mask token>\n\n\ndef add_cliente(parametros):\n query = (\n 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef edit_cliente(_id, parametros):\n query = (\n f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef delete_cliente(_id):\n query = 'DELETE FROM clientes WHERE documento = {_id}'\n get_connection(query)\n\n\n<mask token>\n", "step-4": "import psycopg2\nfrom .connection import get_connection\n\n\ndef get_clientes():\n query = (\n 'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes'\n )\n cursor = get_connection(query)\n return cursor\n\n\ndef get_clientes_by_id(_id):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE documento = {_id}'\n )\n get_connection(query, _id)\n\n\ndef get_clientes_by_nombre(nombre):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {nombre} % ' ')\n get_connection(query, nombre)\n\n\ndef get_clientes_by_fecha(fecha):\n query = (\n f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '\n % {fecha} % ' ')\n get_connection(query, fecha)\n\n\ndef create_pdf():\n pass\n\n\ndef add_cliente(parametros):\n query = (\n 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef edit_cliente(_id, parametros):\n query = (\n f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}'\n )\n get_connection(query, parametros)\n print('Datos almacenados')\n get_clientes()\n\n\ndef delete_cliente(_id):\n query = 'DELETE FROM clientes WHERE documento = {_id}'\n get_connection(query)\n\n\n<mask token>\n", "step-5": "import psycopg2\nfrom .connection import get_connection\n\ndef get_clientes():\n query = 'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes'\n cursor = get_connection(query)\n return cursor\n\ndef get_clientes_by_id(_id):\n query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE documento = {_id}'\n get_connection(query, _id)\n\ndef get_clientes_by_nombre(nombre):\n query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{nombre}%' '\n get_connection(query, nombre)\n \ndef get_clientes_by_fecha(fecha):\n query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{fecha}%' '\n get_connection(query, fecha)\n \ndef create_pdf():\n pass\n \ndef add_cliente(parametros):\n query = 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)' \n get_connection(query, parametros)\n print(\"Datos almacenados\") \n get_clientes()\n \n\n\n\ndef edit_cliente(_id, parametros):\n query = f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}' \n get_connection(query, parametros)\n print(\"Datos almacenados\") \n get_clientes() \n \ndef delete_cliente(_id):\n query = 'DELETE FROM clientes WHERE documento = {_id}'\n get_connection(query)\n \n\n\n'''\ndef delete_cliente():\n #mensaje['text'] = ''\n #try:\n #.tabla.item(.tabla.selection())['text'][0]\n #except IndexError as e:\n # mensaje['text'] = 'Selecciona un producto'\n return\n #mensaje['text'] = ''\n nombre = .tabla.item(.tabla.selection())['text']\n query = 'DELETE FROM clientes WHERE nombre = %s'\n .get_connection(query, (nombre,))\n .mensaje['text'] = 'Cliente {} fue borrado correctamente'.format(nombre)\n .get_clientes()\n\n def edit_cliente():\n .mensaje['text'] = ''\n try:\n .tabla.item(.tabla.selection())['text'][0]\n except IndexError as e:\n .mensaje['text'] = 'Selecciona un producto'\n return\n nombre = .tabla.item(.tabla.selection())['text']\n documento = .tabla.item(.tabla.selection())['values'][0]\n .ventana_edit = Toplevel()\n .ventana_edit.title = 'Editar cliente'\n'''", "step-ids": [ 1, 5, 7, 9, 10 ] }
[ 1, 5, 7, 9, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Glo_EstadoPlan(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Glo_EstadoPlan(models.Model): descripcion_estado = models.CharField(max_length=100) def __str__(self): return '{}'.format(self.descripcion_estado) <|reserved_special_token_1|> from django.db import models class Glo_EstadoPlan(models.Model): descripcion_estado = models.CharField(max_length=100) def __str__(self): return '{}'.format(self.descripcion_estado) <|reserved_special_token_1|> from django.db import models # Create your models here. class Glo_EstadoPlan(models.Model): descripcion_estado = models.CharField(max_length=100) def __str__(self): return '{}'.format(self.descripcion_estado)
flexible
{ "blob_id": "b0a51877b59e14eefdd662bac468e8ce12343e6b", "index": 3885, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Glo_EstadoPlan(models.Model):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Glo_EstadoPlan(models.Model):\n descripcion_estado = models.CharField(max_length=100)\n\n def __str__(self):\n return '{}'.format(self.descripcion_estado)\n", "step-4": "from django.db import models\n\n\nclass Glo_EstadoPlan(models.Model):\n descripcion_estado = models.CharField(max_length=100)\n\n def __str__(self):\n return '{}'.format(self.descripcion_estado)\n", "step-5": "from django.db import models\r\n\r\n# Create your models here.\r\nclass Glo_EstadoPlan(models.Model):\r\n descripcion_estado = models.CharField(max_length=100)\r\n\r\n def __str__(self):\r\n return '{}'.format(self.descripcion_estado)", "step-ids": [ 0, 1, 3, 4, 5 ] }
[ 0, 1, 3, 4, 5 ]
<|reserved_special_token_0|> class UserViewSet(viewsets.ModelViewSet): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class UserViewSet(viewsets.ModelViewSet): <|reserved_special_token_0|> queryset = UserCustom.objects.all() serializer_class = UserSerializer <|reserved_special_token_1|> <|reserved_special_token_0|> class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = UserCustom.objects.all() serializer_class = UserSerializer <|reserved_special_token_1|> from rest_framework import viewsets from .serializers import UserSerializer from .models import UserCustom class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = UserCustom.objects.all() serializer_class = UserSerializer
flexible
{ "blob_id": "fadf16792822926cb7b7386291e52ce44693baf8", "index": 2053, "step-1": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n <mask token>\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n", "step-3": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n", "step-4": "from rest_framework import viewsets\nfrom .serializers import UserSerializer\nfrom .models import UserCustom\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import gym import random import numpy as np import statistics from collections import Counter import tflearn from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression #setup the Cartpole environment env = gym.make("CartPole-v0") env.reset() #----------Explore CartPole-------------# #exploring the observations, rewards, actions def explore_cartpole(): for i_episode in range(2): observation = env.reset() for t in range(100): env.render() print(observation) action = env.action_space.sample() observation, reward, done, info = env.step(action) print("Action: ", action, "Rewards", reward) if done: print("Episode finished after {} timesteps".format(t+1)) break #explore_cartpole() #----------Collect Training Data-------------# #collect data from successful games by running x games #successful would be say, lasting more than 100 frames num_games = 20000 num_episodes = 201 #game would end at 200 episodes min_score = 75 def initial_games(): train_data = [] train_scores = [] #running our initial set of games for _ in range(num_games): game_data = [] prev_obs = [] score = 0 #running the game, frame by frame for _ in range(num_episodes): #choosing actions: randomly action = random.randrange(0,2) observation, reward, done, info = env.step(action) if len(prev_obs) > 0: game_data.append([prev_obs, action]) prev_obs = observation score += reward if done: #print("Score was: ", score) break #if the score was above the threshold #we will save the game in our training data #hence training on the better games if score >= min_score : train_scores.append(score) #converting the data into one-hot output for i in game_data: if i[1] == 0: output = [1, 0] else: output = [0, 1] train_data.append([i[0], output]) env.reset() return train_data #----------Build the FC NN model-------------# #building a simple multi-layer fully connected model #this model can be generally used to play games like cartpole #would try training the model on other games in OpenAI environment def nn_model(input_size): network = input_data(shape=[None, input_size, 1], name='input') network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(network, tensorboard_dir='log') return model #----------Train the model-------------# def train_model(train_data, model=False): x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1) y = [i[1] for i in train_data] if not model: model = nn_model(input_size = len(x[0])) model.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500, show_metric = True, run_id = 'openai_learning') return model train_data = initial_games() #print("Size of training data",len(train_data)) model = train_model(train_data) #----------Predict actions for the games-------------# num_final_games = 10 target_episodes = 201 all_rewards = [] all_actions = [] for _ in range(num_final_games): total_score = 0 prev_obs = [] env.reset() for _ in range(target_episodes): #env.render() #instead of randomly choosing the action, predict the actions if len(prev_obs) == 0: action = random.randrange(0,2) else: action = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0]) all_actions.append(action) #let's run the game observation, reward, done, info = env.step(action) prev_obs = observation total_score += reward if done: break all_rewards.append(total_score) #----------Print results-------------# print('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards)) print('Max reward:', max(all_rewards))
normal
{ "blob_id": "7789e54acc02fe0277ff80ce14efbcdc4ee6e7f1", "index": 8009, "step-1": "<mask token>\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\n<mask token>\n", "step-2": "<mask token>\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\n<mask token>\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n", "step-3": "<mask token>\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n", "step-4": "import gym\nimport random\nimport numpy as np\nimport statistics\nfrom collections import Counter\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n", "step-5": "import gym\nimport random \nimport numpy as np\nimport statistics\nfrom collections import Counter\n\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\n\n#setup the Cartpole environment\nenv = gym.make(\"CartPole-v0\")\nenv.reset()\n\n\n#----------Explore CartPole-------------#\n#exploring the observations, rewards, actions\ndef explore_cartpole():\n\tfor i_episode in range(2):\n\t observation = env.reset()\n\t for t in range(100):\n\t env.render()\n\t print(observation)\n\t action = env.action_space.sample()\n\t observation, reward, done, info = env.step(action)\n\t print(\"Action: \", action, \"Rewards\", reward)\n\t if done:\n\t print(\"Episode finished after {} timesteps\".format(t+1))\n\t break\n\n#explore_cartpole() \n\n#----------Collect Training Data-------------#\n#collect data from successful games by running x games\n#successful would be say, lasting more than 100 frames\nnum_games = 20000\nnum_episodes = 201 #game would end at 200 episodes\nmin_score = 75\n\ndef initial_games():\n\n\ttrain_data = []\n\ttrain_scores = []\n\n\t#running our initial set of games\n\tfor _ in range(num_games):\n\t\tgame_data = []\n\t\tprev_obs = []\n\t\tscore = 0\n\n\t\t#running the game, frame by frame\n\t\tfor _ in range(num_episodes):\n\t\t\t#choosing actions: randomly\n\t\t\taction = random.randrange(0,2)\n\t\t\tobservation, reward, done, info = env.step(action)\n\n\t\t\tif len(prev_obs) > 0: \n\t\t\t\tgame_data.append([prev_obs, action])\n\n\t\t\tprev_obs = observation\n\t\t\tscore += reward\n\n\t\t\tif done:\n\t\t\t\t#print(\"Score was: \", score)\n\t\t\t\tbreak\n\n\t\t#if the score was above the threshold\n\t\t#we will save the game in our training data\n\t\t#hence training on the better games\n\t\tif score >= min_score :\n\t\t\ttrain_scores.append(score)\n\t\t\t#converting the data into one-hot output\t\t\n\t\t\tfor i in game_data:\t\t\t\n\t\t\t\tif i[1] == 0:\n\t\t\t\t\toutput = [1, 0]\n\t\t\t\telse:\n\t\t\t\t\toutput = [0, 1]\n\t\t\t\t\n\t\t\t\ttrain_data.append([i[0], output])\n\n\t\tenv.reset()\n\n\treturn train_data\n\n\n#----------Build the FC NN model-------------#\n#building a simple multi-layer fully connected model\n#this model can be generally used to play games like cartpole\n#would try training the model on other games in OpenAI environment\n\ndef nn_model(input_size):\n\n network = input_data(shape=[None, input_size, 1], name='input')\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n\n return model\n\n\n\n#----------Train the model-------------#\ndef train_model(train_data, model=False):\n\n\tx = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)\n\ty = [i[1] for i in train_data]\n\n\tif not model:\n\t\tmodel = nn_model(input_size = len(x[0]))\n\n\tmodel.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500, \n\t\tshow_metric = True, run_id = 'openai_learning')\n\treturn model\n\ntrain_data = initial_games()\n#print(\"Size of training data\",len(train_data))\n\nmodel = train_model(train_data)\n\n#----------Predict actions for the games-------------#\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\n\nfor _ in range(num_final_games):\n\ttotal_score = 0\n\tprev_obs = []\n\tenv.reset()\n\n\tfor _ in range(target_episodes):\n\n\t\t#env.render()\n\n\t\t#instead of randomly choosing the action, predict the actions\n\t\tif len(prev_obs) == 0:\n\t\t\taction = random.randrange(0,2)\n\t\telse:\n\t\t\taction = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])\n\t\t\n\t\tall_actions.append(action)\n\n\t\t#let's run the game\n\t\tobservation, reward, done, info = env.step(action)\n\t\t\n\t\tprev_obs = observation\n\t\ttotal_score += reward\n\n\t\tif done: \n\t\t\tbreak\n\n\tall_rewards.append(total_score)\n\n#----------Print results-------------#\nprint('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n", "step-ids": [ 2, 5, 6, 7, 8 ] }
[ 2, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def usage_list(self): print('Available modules') print('=================') for module in sorted(self.list()): if 'module' not in self.mods[module]: self.import_module(module) if not self.mods[module]['module'].__doc__: continue text = self.mods[module]['module'].__doc__.strip('\n ') text = text.split('\n') if len(text) > 2: if text[1].startswith('='): text[1] = '=' * (14 + len(text[1])) text = '\n'.join(text) print('\n%-12s: %s' % (module, text)) <|reserved_special_token_1|> def usage_list(self): print('Available modules') print('=================') for module in sorted(self.list()): if ('module' not in self.mods[module]): self.import_module(module) if (not self.mods[module]['module'].__doc__): continue text = self.mods[module]['module'].__doc__.strip('\n ') text = text.split('\n') if (len(text) > 2): if text[1].startswith('='): text[1] = ('=' * (14 + len(text[1]))) text = '\n'.join(text) print(('\n%-12s: %s' % (module, text)))
flexible
{ "blob_id": "d0eb6ea2e816ac59ae93684edb38ff3a49909633", "index": 762, "step-1": "<mask token>\n", "step-2": "def usage_list(self):\n print('Available modules')\n print('=================')\n for module in sorted(self.list()):\n if 'module' not in self.mods[module]:\n self.import_module(module)\n if not self.mods[module]['module'].__doc__:\n continue\n text = self.mods[module]['module'].__doc__.strip('\\n ')\n text = text.split('\\n')\n if len(text) > 2:\n if text[1].startswith('='):\n text[1] = '=' * (14 + len(text[1]))\n text = '\\n'.join(text)\n print('\\n%-12s: %s' % (module, text))\n", "step-3": "def usage_list(self):\n print('Available modules')\n print('=================')\n for module in sorted(self.list()):\n if ('module' not in self.mods[module]):\n self.import_module(module)\n if (not self.mods[module]['module'].__doc__):\n continue\n text = self.mods[module]['module'].__doc__.strip('\\n ')\n text = text.split('\\n')\n if (len(text) > 2):\n if text[1].startswith('='):\n text[1] = ('=' * (14 + len(text[1])))\n text = '\\n'.join(text)\n print(('\\n%-12s: %s' % (module, text)))", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class assignmentObject: <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class assignmentObject: def __init__(self, name, day): self.name = name self.day = day <|reserved_special_token_1|> import datetime class assignmentObject: def __init__(self, name, day): self.name = name self.day = day
flexible
{ "blob_id": "1673214215043644e1a878ed7c30b69064f1a022", "index": 5375, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass assignmentObject:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass assignmentObject:\n\n def __init__(self, name, day):\n self.name = name\n self.day = day\n", "step-4": "import datetime\n\n\nclass assignmentObject:\n\n def __init__(self, name, day):\n self.name = name\n self.day = day\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [url('^send_message$', send_message, name='send_message'), url('^$', index, name='index')] <|reserved_special_token_1|> from django.conf.urls import url from .views import index, send_message urlpatterns = [url('^send_message$', send_message, name='send_message'), url('^$', index, name='index')] <|reserved_special_token_1|> from django.conf.urls import url #from .views import CommandReceiveView from .views import index, send_message urlpatterns = [ #url(r'^bot/(?P<bot_token>.+)/$', CommandReceiveView.as_view(), name='command'), url(r'^send_message$', send_message, name='send_message'), url(r'^$', index, name='index'), ]
flexible
{ "blob_id": "6cc56f73e58366a3906da537cc27fdd5a066ee34", "index": 2647, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [url('^send_message$', send_message, name='send_message'),\n url('^$', index, name='index')]\n", "step-3": "from django.conf.urls import url\nfrom .views import index, send_message\nurlpatterns = [url('^send_message$', send_message, name='send_message'),\n url('^$', index, name='index')]\n", "step-4": "from django.conf.urls import url\n\n#from .views import CommandReceiveView\nfrom .views import index, send_message\n\nurlpatterns = [\n #url(r'^bot/(?P<bot_token>.+)/$', CommandReceiveView.as_view(), name='command'),\n url(r'^send_message$', send_message, name='send_message'),\n url(r'^$', index, name='index'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
quilogramas = float ( input ( "Insira o peso em Kg:" )) libras = quilogramas / 0 , 45 print ( libras )
normal
{ "blob_id": "9c35e64fd773c79dc20e6b388478e892bda85788", "index": 1599, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(libras)\n", "step-3": "quilogramas = float(input('Insira o peso em Kg:'))\nlibras = quilogramas / 0, 45\nprint(libras)\n", "step-4": "quilogramas = float ( input ( \"Insira o peso em Kg:\" ))\nlibras = quilogramas / 0 , 45\nprint ( libras )", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from pypack.Animal import Animal __author__ = 'igord' def nl(): print("\n") def main(): # print("Hello2") # animal = Animal(45) # animal.double_age() # print(animal.age) print("Start") msg = "ana i mujica" msg2 = msg.replace("a", "$") print(msg) print(msg2) ivana = "ivana" print(ivana * 2) # print(sys.api_version) fruit = ["banana", "apple", "legit"] for i in range(len(fruit)): # pass # sys.stdout.write("test") # print("test", end="") print(fruit[i], end="") nl() print([fruit[0], fruit[1]]) print([fruit[0], fruit[1]], sep="$") animal1 = Animal(30) print(animal1) nl() print("End") main()
normal
{ "blob_id": "b0cdf75ff00d72ada75990dd850546414bc11125", "index": 1799, "step-1": "<mask token>\n\n\ndef nl():\n print('\\n')\n\n\ndef main():\n print('Start')\n msg = 'ana i mujica'\n msg2 = msg.replace('a', '$')\n print(msg)\n print(msg2)\n ivana = 'ivana'\n print(ivana * 2)\n fruit = ['banana', 'apple', 'legit']\n for i in range(len(fruit)):\n print(fruit[i], end='')\n nl()\n print([fruit[0], fruit[1]])\n print([fruit[0], fruit[1]], sep='$')\n animal1 = Animal(30)\n print(animal1)\n nl()\n print('End')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef nl():\n print('\\n')\n\n\ndef main():\n print('Start')\n msg = 'ana i mujica'\n msg2 = msg.replace('a', '$')\n print(msg)\n print(msg2)\n ivana = 'ivana'\n print(ivana * 2)\n fruit = ['banana', 'apple', 'legit']\n for i in range(len(fruit)):\n print(fruit[i], end='')\n nl()\n print([fruit[0], fruit[1]])\n print([fruit[0], fruit[1]], sep='$')\n animal1 = Animal(30)\n print(animal1)\n nl()\n print('End')\n\n\nmain()\n", "step-3": "<mask token>\n__author__ = 'igord'\n\n\ndef nl():\n print('\\n')\n\n\ndef main():\n print('Start')\n msg = 'ana i mujica'\n msg2 = msg.replace('a', '$')\n print(msg)\n print(msg2)\n ivana = 'ivana'\n print(ivana * 2)\n fruit = ['banana', 'apple', 'legit']\n for i in range(len(fruit)):\n print(fruit[i], end='')\n nl()\n print([fruit[0], fruit[1]])\n print([fruit[0], fruit[1]], sep='$')\n animal1 = Animal(30)\n print(animal1)\n nl()\n print('End')\n\n\nmain()\n", "step-4": "from pypack.Animal import Animal\n__author__ = 'igord'\n\n\ndef nl():\n print('\\n')\n\n\ndef main():\n print('Start')\n msg = 'ana i mujica'\n msg2 = msg.replace('a', '$')\n print(msg)\n print(msg2)\n ivana = 'ivana'\n print(ivana * 2)\n fruit = ['banana', 'apple', 'legit']\n for i in range(len(fruit)):\n print(fruit[i], end='')\n nl()\n print([fruit[0], fruit[1]])\n print([fruit[0], fruit[1]], sep='$')\n animal1 = Animal(30)\n print(animal1)\n nl()\n print('End')\n\n\nmain()\n", "step-5": "from pypack.Animal import Animal\n\n__author__ = 'igord'\n\n\ndef nl():\n print(\"\\n\")\n\ndef main():\n # print(\"Hello2\")\n # animal = Animal(45)\n # animal.double_age()\n # print(animal.age)\n\n print(\"Start\")\n\n msg = \"ana i mujica\"\n msg2 = msg.replace(\"a\", \"$\")\n print(msg)\n print(msg2)\n ivana = \"ivana\"\n print(ivana * 2)\n\n # print(sys.api_version)\n\n fruit = [\"banana\", \"apple\", \"legit\"]\n for i in range(len(fruit)):\n # pass\n # sys.stdout.write(\"test\")\n # print(\"test\", end=\"\")\n print(fruit[i], end=\"\")\n nl()\n\n print([fruit[0], fruit[1]])\n print([fruit[0], fruit[1]], sep=\"$\")\n\n animal1 = Animal(30)\n print(animal1)\n\n nl()\n print(\"End\")\n\nmain()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# Importing the random library for random choice. import random getnum = int(input("Pick a number greater than 7: ")) # Error checking. if (getnum < 7): print("Error 205: Too little characters entered") print("Run again using python passwordgenerator.py, or click the run button on your IDE.") exit() # A list of random things. lista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\'] # Main function takes two params, lista and get num. def main(lista, getnum): password = '' for i in range(0, getnum): passchar = random.choice(lista) password = password + passchar print(password) passwordagain() #Password again. def passwordagain(): again = input("Do you want to generate another password(y/n)?: ") if (again == 'y'): main(lista,getnum) elif(again == 'n'): exit() else: print("Sorry, couldn't understand what you were saying.") passwordagain() main(lista, getnum)
normal
{ "blob_id": "c40bb410ad68808c2e0cc636820ec6a2ec2739b8", "index": 4053, "step-1": "<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\n<mask token>\n", "step-2": "<mask token>\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\n<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n", "step-3": "<mask token>\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n", "step-4": "import random\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n", "step-5": "# Importing the random library for random choice.\nimport random\ngetnum = int(input(\"Pick a number greater than 7: \"))\n# Error checking.\nif (getnum < 7):\n print(\"Error 205: Too little characters entered\")\n print(\"Run again using python passwordgenerator.py, or click the run button on your IDE.\")\n exit()\n# A list of random things.\nlista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\\\']\n# Main function takes two params, lista and get num.\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n#Password again.\ndef passwordagain():\n again = input(\"Do you want to generate another password(y/n)?: \")\n if (again == 'y'):\n main(lista,getnum)\n elif(again == 'n'):\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\nmain(lista, getnum)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(filtered_words) <|reserved_special_token_0|> print(' '.join(singles)) <|reserved_special_token_1|> stop_words = ['the', 'an', 'is', 'there'] word_list = ['we', 'are', 'the', 'students'] filtered_words = [word for word in word_list if word not in stop_words] print(filtered_words) <|reserved_special_token_0|> cachedStopWords = stopwords.words('english') <|reserved_special_token_0|> stemmer = PorterStemmer() test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died', 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing', 'itemization', 'sensational', 'traditional', 'reference', 'colonizer', 'plotted'] singles = [stemmer.stem(word) for word in test_strs] print(' '.join(singles)) <|reserved_special_token_1|> stop_words = ['the', 'an', 'is', 'there'] word_list = ['we', 'are', 'the', 'students'] filtered_words = [word for word in word_list if word not in stop_words] print(filtered_words) from nltk.corpus import stopwords cachedStopWords = stopwords.words('english') from nltk.stem.porter import * stemmer = PorterStemmer() test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died', 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing', 'itemization', 'sensational', 'traditional', 'reference', 'colonizer', 'plotted'] singles = [stemmer.stem(word) for word in test_strs] print(' '.join(singles)) <|reserved_special_token_1|> # 出现频率特别高的和频率特别低的词对于文本分析帮助不大,一般在预处理阶段会过滤掉。 # 在英文里,经典的停用词为 “The”, "an".... # 方法1: 自己建立一个停用词词典 stop_words = ["the", "an", "is", "there"] # 在使用时: 假设 word_list包含了文本里的单词 word_list = ["we", "are", "the", "students"] filtered_words = [word for word in word_list if word not in stop_words] print (filtered_words) # 方法2:直接利用别人已经构建好的停用词库 from nltk.corpus import stopwords cachedStopWords = stopwords.words("english") from nltk.stem.porter import * stemmer = PorterStemmer() test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died', 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing', 'itemization', 'sensational', 'traditional', 'reference', 'colonizer', 'plotted'] singles = [stemmer.stem(word) for word in test_strs] print(' '.join(singles)) # doctest: +NORMALIZE_WHITESPACE
flexible
{ "blob_id": "d14937aaa7a80d6b95825afa2a2d6ff8202e5f5c", "index": 2498, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(filtered_words)\n<mask token>\nprint(' '.join(singles))\n", "step-3": "stop_words = ['the', 'an', 'is', 'there']\nword_list = ['we', 'are', 'the', 'students']\nfiltered_words = [word for word in word_list if word not in stop_words]\nprint(filtered_words)\n<mask token>\ncachedStopWords = stopwords.words('english')\n<mask token>\nstemmer = PorterStemmer()\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',\n 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',\n 'itemization', 'sensational', 'traditional', 'reference', 'colonizer',\n 'plotted']\nsingles = [stemmer.stem(word) for word in test_strs]\nprint(' '.join(singles))\n", "step-4": "stop_words = ['the', 'an', 'is', 'there']\nword_list = ['we', 'are', 'the', 'students']\nfiltered_words = [word for word in word_list if word not in stop_words]\nprint(filtered_words)\nfrom nltk.corpus import stopwords\ncachedStopWords = stopwords.words('english')\nfrom nltk.stem.porter import *\nstemmer = PorterStemmer()\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',\n 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',\n 'itemization', 'sensational', 'traditional', 'reference', 'colonizer',\n 'plotted']\nsingles = [stemmer.stem(word) for word in test_strs]\nprint(' '.join(singles))\n", "step-5": "# 出现频率特别高的和频率特别低的词对于文本分析帮助不大,一般在预处理阶段会过滤掉。 \r\n# 在英文里,经典的停用词为 “The”, \"an\"....\r\n\r\n# 方法1: 自己建立一个停用词词典\r\nstop_words = [\"the\", \"an\", \"is\", \"there\"]\r\n# 在使用时: 假设 word_list包含了文本里的单词\r\nword_list = [\"we\", \"are\", \"the\", \"students\"]\r\nfiltered_words = [word for word in word_list if word not in stop_words]\r\nprint (filtered_words)\r\n\r\n# 方法2:直接利用别人已经构建好的停用词库\r\nfrom nltk.corpus import stopwords\r\ncachedStopWords = stopwords.words(\"english\")\r\n\r\nfrom nltk.stem.porter import *\r\nstemmer = PorterStemmer()\r\n\r\ntest_strs = ['caresses', 'flies', 'dies', 'mules', 'denied',\r\n 'died', 'agreed', 'owned', 'humbled', 'sized',\r\n 'meeting', 'stating', 'siezing', 'itemization',\r\n 'sensational', 'traditional', 'reference', 'colonizer',\r\n 'plotted']\r\n\r\nsingles = [stemmer.stem(word) for word in test_strs]\r\nprint(' '.join(singles)) # doctest: +NORMALIZE_WHITESPACE", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Python asyncio Protocol extension for TCP use. """ import asyncio import logging import socket class TcpTestProtocol(asyncio.Protocol): """ Extension of asyncio protocol for TCP data """ def __init__(self, test_stream=None, no_delay=False, window=None, server=None): """ Initialize TCP Protocol object. """ self._transport = None self._socket = None self._stream = test_stream self._logger = logging.getLogger('py3iperf3') self._sock_id = None self._no_delay = no_delay self._window = window self._server = server @property def socket_id(self): """Return socket id""" return self._sock_id def set_owner(self, owner, is_stream=False): """Update owner to test from server once ready""" if is_stream: self._logger.debug('TCP Proto Stream is set!') self._stream = owner else: self._server = owner def connection_made(self, transport): """Connection established call-back""" self._transport = transport self._socket = transport.get_extra_info('socket') self._sock_id = self._socket.fileno() if self._server is None: # This is client connecting to the server self.connection_to_server_made(transport) else: # This is incomming connection from the client self.connection_from_client(transport) def connection_from_client(self, transport): """Connection from the client established to the server""" peer_data = transport.get_extra_info('peername') self._logger.info('[%s] incomming connection from %s port %s', self._sock_id, peer_data[0], peer_data[1]) self._server.tcp_connection_established(self) def connection_to_server_made(self, transport): """Connecton to the server established""" local_data = self._socket.getsockname() peer_data = transport.get_extra_info('peername') self._logger.info('[%s] local %s:%s connected to %s:%s', self._sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1]) # No delay OFF -> Nagle's alg used self._socket.setsockopt( socket.IPPROTO_TCP, socket.TCP_NODELAY, 0) # If required - turn off Nagle's alg (No Delay ON) if self._no_delay: self._socket.setsockopt( socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # Set Socket TX/RX buffer sizes if specified if self._window: self._logger.debug('Setting socket buffer sizes to %s B', self._window) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window) # Print current buf sizes: rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf) self._stream.connection_established(self) def data_received(self, data): """ Data received call-back. """ # Inform the server that we have data until the stream is ready if self._stream is None: self._server.control_data_received(self, data) else: self._stream.data_received(data) def connection_lost(self, exc): """ Callback on connection lost. """ if self._stream.done: # Stream is done, no need to panic pass else: self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc) def send_data(self, data): """ Write data to transport. """ self._transport.write(data) def pause_writing(self): """ Pause writing callback from transport. """ self._stream.pause_writing() def resume_writing(self): """ Resume writing callback from transport. """ self._stream.resume_writing()
normal
{ "blob_id": "9f0e286268732e8cabb028b7c84f5ba72a6e8528", "index": 3068, "step-1": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n <mask token>\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n <mask token>\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n", "step-3": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n", "step-4": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n \"\"\"\n Extension of asyncio protocol for TCP data\n \"\"\"\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n", "step-5": "\"\"\"\nPython asyncio Protocol extension for TCP use.\n\"\"\"\nimport asyncio\nimport logging\nimport socket\n\nclass TcpTestProtocol(asyncio.Protocol):\n \"\"\"\n Extension of asyncio protocol for TCP data\n \"\"\"\n\n def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n\n if self._server is None:\n # This is client connecting to the server\n self.connection_to_server_made(transport)\n else:\n # This is incomming connection from the client\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s',\n self._sock_id, peer_data[0], peer_data[1])\n\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n\n self._logger.info('[%s] local %s:%s connected to %s:%s',\n self._sock_id, local_data[0], local_data[1],\n peer_data[0], peer_data[1])\n\n # No delay OFF -> Nagle's alg used\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 0)\n\n # If required - turn off Nagle's alg (No Delay ON)\n if self._no_delay:\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 1)\n\n # Set Socket TX/RX buffer sizes if specified\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window)\n\n # Print current buf sizes:\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf)\n\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n # Inform the server that we have data until the stream is ready\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()", "step-ids": [ 9, 11, 12, 13, 15 ] }
[ 9, 11, 12, 13, 15 ]
# -*- coding: utf-8 -*- """Testing constants for Bio2BEL FlyBase.""" import logging import os log = logging.getLogger(__name__) dir_path = os.path.dirname(os.path.realpath(__file__)) TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
normal
{ "blob_id": "bad719d968b4e358f863b7ef13bc12127f726806", "index": 682, "step-1": "<mask token>\n", "step-2": "<mask token>\nlog = logging.getLogger(__name__)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n", "step-3": "<mask token>\nimport logging\nimport os\nlog = logging.getLogger(__name__)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n", "step-4": "# -*- coding: utf-8 -*-\n\n\"\"\"Testing constants for Bio2BEL FlyBase.\"\"\"\n\nimport logging\nimport os\n\nlog = logging.getLogger(__name__)\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nTEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
x = 1 while x <= 24: if x % 5 == 0: x = x + 1 continue print(x) x = x + 1
normal
{ "blob_id": "61cfc583cd87ac0528cb07f4e051392167414920", "index": 1960, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile x <= 24:\n if x % 5 == 0:\n x = x + 1\n continue\n print(x)\n x = x + 1\n", "step-3": "x = 1\nwhile x <= 24:\n if x % 5 == 0:\n x = x + 1\n continue\n print(x)\n x = x + 1\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# created by ahmad on 17-07-2019 # last updated on 21-07-2019 #recommended font size of console in pydroid is 12 from decimal import Decimal def fromTen(): global fin fin = num nnum = num base = base2 if count == 1: nnum = sum(milst) + sum(mdlst) Ipart = int(nnum) Dpart = Decimal(nnum - Ipart) strDpart = str(Dpart) Ilist = [] Dlist = [] print("digits before . (dot) is {} ".format(Ipart)) if strDpart == "0": print("digits after . (dot) is 0") else: print("digits after . (dot) is {}".format(strDpart[2:])) print(" --------------------------------------------------") print("| INTEGRAL PART |") print(" --------------------------------------------------") print(" {}|_{}".format(base, Ipart)) while nnum >= base: rem = int(nnum % base) srem = str(rem) nnum = int(nnum / base) Ilist.append(rem) if nnum >= base: print(" {}|_".format(base) + str(nnum) + " --->{}".format(srem)) else: print(" " + str(nnum) + " --->{}".format(srem)) Ilist.append(nnum) print(" --------------------------------------------------") IIlist = Ilist for i in range(len(IIlist)): try: a = int(IIlist[i]) + 55 if a > 64: IIlist[i] = chr(a) except: pass print(Ilist[::-1]) print() print(" --------------------------------------------------") print("| DECIMAL PART |") print(" --------------------------------------------------") k = 0 while k < (len(strDpart) - 2) * 2: print("{} x {} = ".format(Dpart, base), end='') a = Dpart * base Dpart = a - int(a) print(a) a1 = int(a) Dlist.append(a1) k = k + 1 print(" --------------------------------------------------") print("integer part:") print(Ilist[::-1]) print("decimal part:") print(Dlist) dot = ["."] y=Ilist[::-1] y1=y+dot+ Dlist for i in range(len(y1)): y1[i]=str(y1[i]) print("Final Answer = ",'(' ,''.join(y1),')','base',base2) def toTen(): mnum = num mbase = base1 global fin mdnum = mnum - int(mnum) minum = int(mnum) strmdnum = str(mdnum)[2:] mdlen = len(strmdnum) strminum = str(minum)[::-1] milen = len(strminum) strnum = strmdnum + strminum con = 0 for i in range(len(strnum)): a = int(strnum[i]) if a >= mbase: con = con + 1 if con == 0: p = 0 global milst, mdlst milst = [] mdlst = [] print(" --------------------------------------------------") print("| INTEGRAL PART |") print(" --------------------------------------------------") for ii in range(milen): minum = int(strminum[ii]) power1 = pow(mbase, p) print("""{} power {} is "{}" """.format(mbase, p, power1), " --> {} x {} = {}".format(power1, minum, minum * power1)) p = p + 1 milst.append(minum * power1) print("___________________________________________________") print() print("ADDITION OF INTEGRAL PART ===> ", end='') for i in range(milen): if (i + 1) < (milen): print(" {} +".format(milst[i]), end='') if i + 1 == milen: print("{} = ".format(milst[i]), end='') print(sum(milst)) print() print("___________________________________________________") print(" --------------------------------------------------") print("| DECIMAL PART |") print(" --------------------------------------------------") print() mbase = Decimal(mbase) for jj in range(mdlen): q = Decimal(pow(mbase, -(jj + 1))) print("{} power {} = {} ---> ".format(mbase, -(jj + 1), q)) # ,end='') print(" ", strmdnum[jj], " x ", q, " = ", q * int(strmdnum[jj])) mdlst.append(float(q * int(strmdnum[jj]))) print(" --------------------------------------------------") print(sum(mdlst)) print("___________________________________________________") print() print("ADDITION OF DECIMAL PART ===> ", end='') for i in range(mdlen): if (i + 1) < (mdlen): print(" {} +".format(mdlst[i]), end='') if i + 1 == mdlen: print("{} = ".format(mdlst[i]), end='') print(sum(mdlst)) print("___________________________________________________") # print("---------------------------------------------------------------") print("SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = ".format(sum(milst), sum(mdlst)), sum(milst) + sum(mdlst)) print(" --------------------------------------------------") else: try: print(" --------------------------------------------------") print(" ---------------------") print(" | INVALID |") print(" ---------------------") print() print("all the digits should be less than the base ") print("The base of {} should not be {}".format(mnum, mbase)) print() main() except: pass def forBoth(): toTen() global count count = 1 fromTen() def main(): global num, base1, base2, count, fin count = 0 num = Decimal(input("Enter a number :")) base1 = int(input("Enter base of {} :".format(num))) base2 = int(input("Enter the base of resulting number:")) print(num) if base1 == 10: fromTen() elif base2 == 10: toTen() else: forBoth() s = 1 if s == 1: main() s = s + 1 while True: print("\n") condition = input("Do you want to continue ? (y/n):") if condition == "y": main() elif condition == "n": print() quit() else: print("Invalid input")
normal
{ "blob_id": "9cf32e127664cb4c3290e665e35245acc936e064", "index": 4090, "step-1": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\n<mask token>\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\n<mask token>\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n", "step-3": "<mask token>\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n", "step-4": "from decimal import Decimal\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print('digits before . (dot) is {} '.format(Ipart))\n if strDpart == '0':\n print('digits after . (dot) is 0')\n else:\n print('digits after . (dot) is {}'.format(strDpart[2:]))\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n print(' {}|_{}'.format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(' {}|_'.format(base) + str(nnum) + ' --->{}'.format(srem)\n )\n else:\n print(' ' + str(nnum) + ' --->{}'.format(srem))\n Ilist.append(nnum)\n print(' --------------------------------------------------')\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n print(Ilist[::-1])\n print()\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print('{} x {} = '.format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n print(' --------------------------------------------------')\n print('integer part:')\n print(Ilist[::-1])\n print('decimal part:')\n print(Dlist)\n dot = ['.']\n y = Ilist[::-1]\n y1 = y + dot + Dlist\n for i in range(len(y1)):\n y1[i] = str(y1[i])\n print('Final Answer = ', '(', ''.join(y1), ')', 'base', base2)\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(' --------------------------------------------------')\n print('| INTEGRAL PART |')\n print(' --------------------------------------------------')\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print('{} power {} is \"{}\" '.format(mbase, p, power1),\n ' --> {} x {} = {}'.format(power1, minum, minum * power1)\n )\n p = p + 1\n milst.append(minum * power1)\n print('___________________________________________________')\n print()\n print('ADDITION OF INTEGRAL PART ===> ', end='')\n for i in range(milen):\n if i + 1 < milen:\n print(' {} +'.format(milst[i]), end='')\n if i + 1 == milen:\n print('{} = '.format(milst[i]), end='')\n print(sum(milst))\n print()\n print('___________________________________________________')\n print(' --------------------------------------------------')\n print('| DECIMAL PART |')\n print(' --------------------------------------------------')\n print()\n mbase = Decimal(mbase)\n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print('{} power {} = {} ---> '.format(mbase, -(jj + 1), q)\n )\n print(' ', strmdnum[jj], ' x ', q,\n ' = ', q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(' --------------------------------------------------')\n print(sum(mdlst))\n print('___________________________________________________')\n print()\n print('ADDITION OF DECIMAL PART ===> ', end='')\n for i in range(mdlen):\n if i + 1 < mdlen:\n print(' {} +'.format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print('{} = '.format(mdlst[i]), end='')\n print(sum(mdlst))\n print('___________________________________________________')\n print('SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = '.format(\n sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(' --------------------------------------------------')\n else:\n try:\n print(' --------------------------------------------------')\n print(' ---------------------')\n print(' | INVALID |')\n print(' ---------------------')\n print()\n print('all the digits should be less than the base ')\n print('The base of {} should not be {}'.format(mnum, mbase))\n print()\n main()\n except:\n pass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n num = Decimal(input('Enter a number :'))\n base1 = int(input('Enter base of {} :'.format(num)))\n base2 = int(input('Enter the base of resulting number:'))\n print(num)\n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print('\\n')\n condition = input('Do you want to continue ? (y/n):')\n if condition == 'y':\n main()\n elif condition == 'n':\n print()\n quit()\n else:\n print('Invalid input')\n", "step-5": "# created by ahmad on 17-07-2019\n# last updated on 21-07-2019\n#recommended font size of console in pydroid is 12\n\nfrom decimal import Decimal\n\n\ndef fromTen():\n global fin\n fin = num\n nnum = num\n base = base2\n if count == 1:\n nnum = sum(milst) + sum(mdlst)\n \n Ipart = int(nnum)\n Dpart = Decimal(nnum - Ipart)\n strDpart = str(Dpart)\n Ilist = []\n Dlist = []\n print(\"digits before . (dot) is {} \".format(Ipart))\n if strDpart == \"0\":\n print(\"digits after . (dot) is 0\")\n else:\n print(\"digits after . (dot) is {}\".format(strDpart[2:])) \n print(\" --------------------------------------------------\")\n print(\"| INTEGRAL PART |\")\n print(\" --------------------------------------------------\")\n print(\" {}|_{}\".format(base, Ipart))\n while nnum >= base:\n rem = int(nnum % base)\n srem = str(rem)\n nnum = int(nnum / base)\n Ilist.append(rem)\n if nnum >= base:\n print(\" {}|_\".format(base) + str(nnum) + \" --->{}\".format(srem))\n else:\n print(\" \" + str(nnum) + \" --->{}\".format(srem))\n Ilist.append(nnum)\n print(\" --------------------------------------------------\")\n IIlist = Ilist\n for i in range(len(IIlist)):\n try:\n a = int(IIlist[i]) + 55\n if a > 64:\n IIlist[i] = chr(a)\n except:\n pass\n \n print(Ilist[::-1])\n print()\n print(\" --------------------------------------------------\")\n print(\"| DECIMAL PART |\")\n print(\" --------------------------------------------------\")\n k = 0\n while k < (len(strDpart) - 2) * 2:\n print(\"{} x {} = \".format(Dpart, base), end='')\n a = Dpart * base\n Dpart = a - int(a)\n print(a)\n a1 = int(a)\n Dlist.append(a1)\n k = k + 1\n\n print(\" --------------------------------------------------\")\n print(\"integer part:\")\n print(Ilist[::-1])\n print(\"decimal part:\")\n print(Dlist)\n dot = [\".\"]\n y=Ilist[::-1]\n y1=y+dot+ Dlist\n for i in range(len(y1)):\n \ty1[i]=str(y1[i])\n \n print(\"Final Answer = \",'(' ,''.join(y1),')','base',base2)\n\n\n\ndef toTen():\n mnum = num\n mbase = base1\n global fin\n mdnum = mnum - int(mnum)\n minum = int(mnum)\n\n strmdnum = str(mdnum)[2:]\n mdlen = len(strmdnum)\n\n strminum = str(minum)[::-1]\n milen = len(strminum)\n strnum = strmdnum + strminum\n con = 0\n for i in range(len(strnum)):\n a = int(strnum[i])\n if a >= mbase:\n con = con + 1\n if con == 0:\n p = 0\n global milst, mdlst\n milst = []\n mdlst = []\n print(\" --------------------------------------------------\")\n print(\"| INTEGRAL PART |\")\n print(\" --------------------------------------------------\")\n for ii in range(milen):\n minum = int(strminum[ii])\n power1 = pow(mbase, p)\n print(\"\"\"{} power {} is \"{}\" \"\"\".format(mbase, p, power1),\n \" --> {} x {} = {}\".format(power1, minum, minum * power1))\n p = p + 1\n milst.append(minum * power1)\n print(\"___________________________________________________\")\n print()\n print(\"ADDITION OF INTEGRAL PART ===> \", end='')\n for i in range(milen):\n if (i + 1) < (milen):\n print(\" {} +\".format(milst[i]), end='')\n if i + 1 == milen:\n print(\"{} = \".format(milst[i]), end='')\n print(sum(milst))\n print()\n print(\"___________________________________________________\")\n\n print(\" --------------------------------------------------\")\n print(\"| DECIMAL PART |\")\n print(\" --------------------------------------------------\")\n print()\n mbase = Decimal(mbase)\n \n for jj in range(mdlen):\n q = Decimal(pow(mbase, -(jj + 1)))\n print(\"{} power {} = {} ---> \".format(mbase, -(jj + 1), q)) # ,end='')\n print(\" \", strmdnum[jj], \" x \", q, \" = \", q * int(strmdnum[jj]))\n mdlst.append(float(q * int(strmdnum[jj])))\n print(\" --------------------------------------------------\")\n print(sum(mdlst))\n print(\"___________________________________________________\")\n print()\n print(\"ADDITION OF DECIMAL PART ===> \", end='')\n for i in range(mdlen):\n if (i + 1) < (mdlen):\n print(\" {} +\".format(mdlst[i]), end='')\n if i + 1 == mdlen:\n print(\"{} = \".format(mdlst[i]), end='')\n print(sum(mdlst))\n print(\"___________________________________________________\")\n # print(\"---------------------------------------------------------------\")\n print(\"SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = \".format(sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))\n print(\" --------------------------------------------------\")\n else:\n\n \ttry:\n \tprint(\" --------------------------------------------------\")\n \tprint(\" ---------------------\")\n \tprint(\" | INVALID |\")\n \tprint(\" ---------------------\")\n \tprint()\n \tprint(\"all the digits should be less than the base \")\n \tprint(\"The base of {} should not be {}\".format(mnum, mbase))\n \tprint()\n \tmain()\n \texcept:\n \tpass\n\n\ndef forBoth():\n toTen()\n global count\n count = 1\n fromTen()\n\n\ndef main():\n global num, base1, base2, count, fin\n count = 0\n \n num = Decimal(input(\"Enter a number :\"))\n base1 = int(input(\"Enter base of {} :\".format(num)))\n base2 = int(input(\"Enter the base of resulting number:\"))\n print(num)\n \n if base1 == 10:\n fromTen()\n elif base2 == 10:\n toTen()\n else:\n forBoth()\n\n\ns = 1\nif s == 1:\n main()\n s = s + 1\nwhile True:\n print(\"\\n\")\n condition = input(\"Do you want to continue ? (y/n):\")\n if condition == \"y\":\n main()\n elif condition == \"n\":\n print()\n \n quit()\n else:\n print(\"Invalid input\")\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
import sys, os def carp(): sys.stderr = sys.stdin print "content-type: text/plain" print #carp() import sesspool import cornerhost.config ## set up session pool = sesspool.SessPool("sess/sessions.db") SESS = sesspool.Sess(pool, REQ, RES) SESS.start() ENG.do_on_exit(SESS.stop) CLERK = cornerhost.config.makeClerk()
normal
{ "blob_id": "adae1d7cc2a866c9bc3cd21cb54a0191389f8083", "index": 3914, "step-1": "import sys, os\ndef carp():\n sys.stderr = sys.stdin\n print \"content-type: text/plain\"\n print \n#carp()\n\nimport sesspool\nimport cornerhost.config\n\n\n## set up session\npool = sesspool.SessPool(\"sess/sessions.db\")\nSESS = sesspool.Sess(pool, REQ, RES)\nSESS.start()\nENG.do_on_exit(SESS.stop)\n\n\nCLERK = cornerhost.config.makeClerk()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import json from tqdm import tqdm from topic.topic import get_topic_scores, get_topic_similarity user_weights = json.load(open('data/selected_user_weights.json', 'r', encoding='utf8')) reviews = json.load(open('data/business_reviews_test.json', 'r', encoding='utf8')) for business, business_reviews in reviews.items(): for target_user in user_weights: if target_user in business_reviews: target_stars = business_reviews[target_user]['stars'] star_sum = 0 weighted_star_sum = 0 weight_sum = 0 num_scores = 0 for user, review in business_reviews.items(): if user != target_user: text = review['text'] stars = review['stars'] # topic_scores = get_topic_scores(text) # weight = 0 # for i, score in topic_scores: # weight += score * user_weights[target_user][i] weight = get_topic_similarity(user_weights[target_user], text) weighted_star_sum += stars * weight weight_sum += weight star_sum += stars num_scores += 1 predicted_stars = weighted_star_sum / weight_sum average = star_sum / num_scores
normal
{ "blob_id": "be90447eb7c717ae0bae28fd7f10238be733648d", "index": 3617, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor business, business_reviews in reviews.items():\n for target_user in user_weights:\n if target_user in business_reviews:\n target_stars = business_reviews[target_user]['stars']\n star_sum = 0\n weighted_star_sum = 0\n weight_sum = 0\n num_scores = 0\n for user, review in business_reviews.items():\n if user != target_user:\n text = review['text']\n stars = review['stars']\n weight = get_topic_similarity(user_weights[target_user],\n text)\n weighted_star_sum += stars * weight\n weight_sum += weight\n star_sum += stars\n num_scores += 1\n predicted_stars = weighted_star_sum / weight_sum\n average = star_sum / num_scores\n", "step-3": "<mask token>\nuser_weights = json.load(open('data/selected_user_weights.json', 'r',\n encoding='utf8'))\nreviews = json.load(open('data/business_reviews_test.json', 'r', encoding=\n 'utf8'))\nfor business, business_reviews in reviews.items():\n for target_user in user_weights:\n if target_user in business_reviews:\n target_stars = business_reviews[target_user]['stars']\n star_sum = 0\n weighted_star_sum = 0\n weight_sum = 0\n num_scores = 0\n for user, review in business_reviews.items():\n if user != target_user:\n text = review['text']\n stars = review['stars']\n weight = get_topic_similarity(user_weights[target_user],\n text)\n weighted_star_sum += stars * weight\n weight_sum += weight\n star_sum += stars\n num_scores += 1\n predicted_stars = weighted_star_sum / weight_sum\n average = star_sum / num_scores\n", "step-4": "import json\nfrom tqdm import tqdm\nfrom topic.topic import get_topic_scores, get_topic_similarity\nuser_weights = json.load(open('data/selected_user_weights.json', 'r',\n encoding='utf8'))\nreviews = json.load(open('data/business_reviews_test.json', 'r', encoding=\n 'utf8'))\nfor business, business_reviews in reviews.items():\n for target_user in user_weights:\n if target_user in business_reviews:\n target_stars = business_reviews[target_user]['stars']\n star_sum = 0\n weighted_star_sum = 0\n weight_sum = 0\n num_scores = 0\n for user, review in business_reviews.items():\n if user != target_user:\n text = review['text']\n stars = review['stars']\n weight = get_topic_similarity(user_weights[target_user],\n text)\n weighted_star_sum += stars * weight\n weight_sum += weight\n star_sum += stars\n num_scores += 1\n predicted_stars = weighted_star_sum / weight_sum\n average = star_sum / num_scores\n", "step-5": "import json\n\nfrom tqdm import tqdm\n\nfrom topic.topic import get_topic_scores, get_topic_similarity\n\nuser_weights = json.load(open('data/selected_user_weights.json', 'r', encoding='utf8'))\nreviews = json.load(open('data/business_reviews_test.json', 'r', encoding='utf8'))\n\nfor business, business_reviews in reviews.items():\n for target_user in user_weights:\n if target_user in business_reviews:\n target_stars = business_reviews[target_user]['stars']\n star_sum = 0\n weighted_star_sum = 0\n weight_sum = 0\n num_scores = 0\n for user, review in business_reviews.items():\n if user != target_user:\n text = review['text']\n stars = review['stars']\n # topic_scores = get_topic_scores(text)\n # weight = 0\n # for i, score in topic_scores:\n # weight += score * user_weights[target_user][i]\n weight = get_topic_similarity(user_weights[target_user], text)\n weighted_star_sum += stars * weight\n weight_sum += weight\n star_sum += stars\n num_scores += 1\n predicted_stars = weighted_star_sum / weight_sum\n average = star_sum / num_scores\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> data.drop_duplicates(subset='ip', inplace=True, keep='first') data.reset_index(drop=True, inplace=True) <|reserved_special_token_0|> cols.extend(sites) <|reserved_special_token_0|> attributes.set_index('userID', inplace=True) for col in cols: if col == 'userID': continue attributes[col] = 0 <|reserved_special_token_0|> for i, row in data.iterrows(): vp = row.sites for site in vp: if site in sites: attributes.iloc[i][site] = 1 attributes.to_csv('user_attributes.arff', header=False) <|reserved_special_token_0|> for site in sites: header += '@ATTRIBUTE ' + site + ' {0, 1}\n' header += '\n\n@DATA\n' with open('user_attributes.arff', 'r+') as f: content = f.read() f.seek(0, 0) f.write(header.rstrip('\r') + '\n' + content) <|reserved_special_token_1|> <|reserved_special_token_0|> INPUT_FILE = 'data_full.csv' data = pd.read_csv(INPUT_FILE) data['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S') data = data[['ip', 'address']] data['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x)) data = data[['ip', 'sites']] data['sites'] = data['sites'].apply(lambda x: x.split(',')) data.drop_duplicates(subset='ip', inplace=True, keep='first') data.reset_index(drop=True, inplace=True) INPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv' sites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]). values.tolist()).flatten() cols = ['userID'] cols.extend(sites) attributes = pd.DataFrame(columns=cols) attributes['userID'] = data.ip attributes.set_index('userID', inplace=True) for col in cols: if col == 'userID': continue attributes[col] = 0 len_sites = len(sites) for i, row in data.iterrows(): vp = row.sites for site in vp: if site in sites: attributes.iloc[i][site] = 1 attributes.to_csv('user_attributes.arff', header=False) header = '@RELATION user_attributes.arff\n\n' + '@ATTRIBUTE userID STRING\n' for site in sites: header += '@ATTRIBUTE ' + site + ' {0, 1}\n' header += '\n\n@DATA\n' with open('user_attributes.arff', 'r+') as f: content = f.read() f.seek(0, 0) f.write(header.rstrip('\r') + '\n' + content) <|reserved_special_token_1|> import numpy as np import pandas as pd INPUT_FILE = 'data_full.csv' data = pd.read_csv(INPUT_FILE) data['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S') data = data[['ip', 'address']] data['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x)) data = data[['ip', 'sites']] data['sites'] = data['sites'].apply(lambda x: x.split(',')) data.drop_duplicates(subset='ip', inplace=True, keep='first') data.reset_index(drop=True, inplace=True) INPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv' sites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]). values.tolist()).flatten() cols = ['userID'] cols.extend(sites) attributes = pd.DataFrame(columns=cols) attributes['userID'] = data.ip attributes.set_index('userID', inplace=True) for col in cols: if col == 'userID': continue attributes[col] = 0 len_sites = len(sites) for i, row in data.iterrows(): vp = row.sites for site in vp: if site in sites: attributes.iloc[i][site] = 1 attributes.to_csv('user_attributes.arff', header=False) header = '@RELATION user_attributes.arff\n\n' + '@ATTRIBUTE userID STRING\n' for site in sites: header += '@ATTRIBUTE ' + site + ' {0, 1}\n' header += '\n\n@DATA\n' with open('user_attributes.arff', 'r+') as f: content = f.read() f.seek(0, 0) f.write(header.rstrip('\r') + '\n' + content) <|reserved_special_token_1|> import numpy as np import pandas as pd INPUT_FILE = 'data_full.csv' data = pd.read_csv(INPUT_FILE) data['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S') # Wyodrębnienie użytkowników i stron na jakie wchodzili do postaci <USER> [<SITES>] data = data[['ip', 'address']] data['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x)) data = data[['ip', 'sites']] data['sites'] = data['sites'].apply(lambda x: x.split(',')) data.drop_duplicates(subset='ip', inplace=True, keep='first') data.reset_index(drop=True, inplace=True) # Analiza koszykowa INPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv' sites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]).values.tolist()).flatten() cols = ['userID'] cols.extend(sites) attributes = pd.DataFrame(columns=cols) attributes['userID'] = data.ip attributes.set_index('userID', inplace=True) # Transformacja koszykowa for col in cols: if col == 'userID': continue attributes[col] = 0 len_sites = len(sites) for i, row in data.iterrows(): vp = row.sites for site in vp: if site in sites: attributes.iloc[i][site] = 1 attributes.to_csv('user_attributes.arff', header=False) header = '@RELATION user_attributes.arff\n\n' + \ '@ATTRIBUTE userID STRING\n' for site in sites: header += '@ATTRIBUTE ' + site + ' {0, 1}\n' header += '\n\n@DATA\n' with open('user_attributes.arff', 'r+') as f: content = f.read() f.seek(0, 0) f.write(header.rstrip('\r') + '\n' + content)
flexible
{ "blob_id": "3b61d389eda85ddb4c96f93c977a33b91da579ce", "index": 7900, "step-1": "<mask token>\n", "step-2": "<mask token>\ndata.drop_duplicates(subset='ip', inplace=True, keep='first')\ndata.reset_index(drop=True, inplace=True)\n<mask token>\ncols.extend(sites)\n<mask token>\nattributes.set_index('userID', inplace=True)\nfor col in cols:\n if col == 'userID':\n continue\n attributes[col] = 0\n<mask token>\nfor i, row in data.iterrows():\n vp = row.sites\n for site in vp:\n if site in sites:\n attributes.iloc[i][site] = 1\nattributes.to_csv('user_attributes.arff', header=False)\n<mask token>\nfor site in sites:\n header += '@ATTRIBUTE ' + site + ' {0, 1}\\n'\nheader += '\\n\\n@DATA\\n'\nwith open('user_attributes.arff', 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(header.rstrip('\\r') + '\\n' + content)\n", "step-3": "<mask token>\nINPUT_FILE = 'data_full.csv'\ndata = pd.read_csv(INPUT_FILE)\ndata['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S')\ndata = data[['ip', 'address']]\ndata['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x))\ndata = data[['ip', 'sites']]\ndata['sites'] = data['sites'].apply(lambda x: x.split(','))\ndata.drop_duplicates(subset='ip', inplace=True, keep='first')\ndata.reset_index(drop=True, inplace=True)\nINPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv'\nsites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]).\n values.tolist()).flatten()\ncols = ['userID']\ncols.extend(sites)\nattributes = pd.DataFrame(columns=cols)\nattributes['userID'] = data.ip\nattributes.set_index('userID', inplace=True)\nfor col in cols:\n if col == 'userID':\n continue\n attributes[col] = 0\nlen_sites = len(sites)\nfor i, row in data.iterrows():\n vp = row.sites\n for site in vp:\n if site in sites:\n attributes.iloc[i][site] = 1\nattributes.to_csv('user_attributes.arff', header=False)\nheader = '@RELATION user_attributes.arff\\n\\n' + '@ATTRIBUTE userID STRING\\n'\nfor site in sites:\n header += '@ATTRIBUTE ' + site + ' {0, 1}\\n'\nheader += '\\n\\n@DATA\\n'\nwith open('user_attributes.arff', 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(header.rstrip('\\r') + '\\n' + content)\n", "step-4": "import numpy as np\nimport pandas as pd\nINPUT_FILE = 'data_full.csv'\ndata = pd.read_csv(INPUT_FILE)\ndata['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S')\ndata = data[['ip', 'address']]\ndata['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x))\ndata = data[['ip', 'sites']]\ndata['sites'] = data['sites'].apply(lambda x: x.split(','))\ndata.drop_duplicates(subset='ip', inplace=True, keep='first')\ndata.reset_index(drop=True, inplace=True)\nINPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv'\nsites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]).\n values.tolist()).flatten()\ncols = ['userID']\ncols.extend(sites)\nattributes = pd.DataFrame(columns=cols)\nattributes['userID'] = data.ip\nattributes.set_index('userID', inplace=True)\nfor col in cols:\n if col == 'userID':\n continue\n attributes[col] = 0\nlen_sites = len(sites)\nfor i, row in data.iterrows():\n vp = row.sites\n for site in vp:\n if site in sites:\n attributes.iloc[i][site] = 1\nattributes.to_csv('user_attributes.arff', header=False)\nheader = '@RELATION user_attributes.arff\\n\\n' + '@ATTRIBUTE userID STRING\\n'\nfor site in sites:\n header += '@ATTRIBUTE ' + site + ' {0, 1}\\n'\nheader += '\\n\\n@DATA\\n'\nwith open('user_attributes.arff', 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(header.rstrip('\\r') + '\\n' + content)\n", "step-5": "import numpy as np\nimport pandas as pd\n\nINPUT_FILE = 'data_full.csv'\ndata = pd.read_csv(INPUT_FILE)\ndata['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S')\n\n# Wyodrębnienie użytkowników i stron na jakie wchodzili do postaci <USER> [<SITES>]\ndata = data[['ip', 'address']]\ndata['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x))\ndata = data[['ip', 'sites']]\ndata['sites'] = data['sites'].apply(lambda x: x.split(','))\ndata.drop_duplicates(subset='ip', inplace=True, keep='first')\ndata.reset_index(drop=True, inplace=True)\n\n# Analiza koszykowa\nINPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv'\nsites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]).values.tolist()).flatten()\n\ncols = ['userID']\ncols.extend(sites)\nattributes = pd.DataFrame(columns=cols)\nattributes['userID'] = data.ip\nattributes.set_index('userID', inplace=True)\n\n# Transformacja koszykowa\nfor col in cols:\n if col == 'userID':\n continue\n attributes[col] = 0\n\nlen_sites = len(sites)\nfor i, row in data.iterrows():\n vp = row.sites\n for site in vp:\n if site in sites:\n attributes.iloc[i][site] = 1\n\nattributes.to_csv('user_attributes.arff', header=False)\n\nheader = '@RELATION user_attributes.arff\\n\\n' + \\\n '@ATTRIBUTE userID STRING\\n'\n\nfor site in sites:\n header += '@ATTRIBUTE ' + site + ' {0, 1}\\n'\n\nheader += '\\n\\n@DATA\\n'\n\nwith open('user_attributes.arff', 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(header.rstrip('\\r') + '\\n' + content)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from app import app from flask import request @app.route('/') @app.route('/index') def index(): return 'Hello world' @app.route('/api_post', methods=['POST']) def postJsonHandler(): print(request.is_json) content = request.get_json() print(content) return 'JSON posted'
normal
{ "blob_id": "9d8c4bf9f9279d5e30d0e9742cdd31713e5f4b9e", "index": 2104, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\[email protected]('/api_post', methods=['POST'])\ndef postJsonHandler():\n print(request.is_json)\n content = request.get_json()\n print(content)\n return 'JSON posted'\n", "step-4": "from app import app\nfrom flask import request\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\[email protected]('/api_post', methods=['POST'])\ndef postJsonHandler():\n print(request.is_json)\n content = request.get_json()\n print(content)\n return 'JSON posted'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import sys def solve(n, k): wrap = 2 ** n snaps_that_matter = k % wrap return snaps_that_matter == wrap - 1 def main(): lines = sys.stdin.readlines() T = int(lines[0]) for i, line in enumerate(lines[1:]): N, K = line.split(' ') on = solve(int(N), int(K)) str_on = 'OFF' if on: str_on = 'ON' print 'Case #%d: %s' % (i+1, str_on) if __name__ == '__main__': main()
normal
{ "blob_id": "1803f634c8e833f4a92ae35bcfafb04dfd1d2305", "index": 7661, "step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef solve(n, k):\n wrap = 2 ** n\n snaps_that_matter = k % wrap\n return snaps_that_matter == wrap - 1\n\ndef main():\n lines = sys.stdin.readlines()\n T = int(lines[0])\n \n for i, line in enumerate(lines[1:]):\n N, K = line.split(' ')\n on = solve(int(N), int(K))\n str_on = 'OFF'\n if on:\n str_on = 'ON'\n print 'Case #%d: %s' % (i+1, str_on)\n\nif __name__ == '__main__': main()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> @app.route('/login', methods=['POST']) def login() ->dict: db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) data: dict = request.get_json() query: str = ( 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Docente' session['DocenteDNI'] = rows[0].DocenteDNI session['Nombre'] = rows[0].Nombre session['Apellido'] = rows[0].Apellido session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200) else: query: str = ( 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Administrador' session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200 ) else: db_cursor.close() db_connection.close() return make_response('pos a lo mejor se equivoco?', 401) @app.route('/teacher_fullname', methods=['GET']) def teacherFullname() ->dict: if 'account_type' not in session: return make_response('pa que quieres saber eso jaja salu2', 401) elif session['account_type'] == 'Docente': return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']} elif session['account_type'] == 'Administrador': return make_response('wey no!!!', 400) @app.route('/time', methods=['GET']) def time() ->dict: if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() return {'date': current_datetime.strftime('%d/%m/%Y'), 'time': current_datetime.strftime('%H,%M,%S')} @app.route('/teacher_course_list', methods=['GET']) def teacherCourseList() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor() db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'), session['DocenteDNI'])) today_assigned_courses: list = db_cursor.fetchall() today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), today_assigned_courses) if len(today_assigned_courses) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for course in today_assigned_courses: db_cursor.execute(existence_check_query, (current_datetime. strftime('%Y/%m/%d'), course['AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: course['state'] = 'marked' elif current_datetime >= scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime): if current_datetime - scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime ) <= teacher_time_tolerance: course['state'] = 'mark_now' else: course['state'] = 'not_marked' else: course['state'] = 'waiting' db_cursor.close() db_connection.close() return jsonify(today_assigned_courses) elif session['account_type'] == 'Administrador': return make_response('ya nos jakiaron', 400) <|reserved_special_token_0|> @app.route('/admin_add_teacher', methods=['POST']) def adminAddTeacher() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': data = request.get_json() db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'insert into Docente() values(?,?,?,?,?)' db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[ 'Apellido'], data['Usuario'], data['Contrasena'])) db_cursor.close() db_connection.close() return make_response('se agrego la entrada', 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_teacher_table', methods=['GET']) def adminGetTeacherTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Docente' db_cursor.execute(query) teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(teacher_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_table', methods=['GET']) def adminGetCourseTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Curso' db_cursor.execute(query) course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio', 'FechaFin'), db_cursor.fetchall()) for course in course_table: course['FechaInicio'] = course['FechaInicio'].isoformat() course['FechaFin'] = course['FechaFin'].isoformat() db_cursor.close() db_connection.close() return make_response(jsonify(course_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_classroom_table', methods=['GET']) def adminGetClassroomTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select Pabellon,Numero from Salon' db_cursor.execute(query) classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(classroom_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_assignment_table', methods=['GET']) def adminGetCourseAssignmentTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ( 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)' ) db_cursor.execute(query) course_assignment_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero', 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(course_assignment_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/logout', methods=['DELETE']) def logout() ->dict: if 'account_type' not in session: return make_response('primero inicia session broz', 301) elif session['account_type'] == 'Docente': session.pop('Usuario') session.pop('Nombre') session.pop('Apellido') return make_response('hasta luego prosor', 200) elif session['account_type'] == 'Administrador': session.pop('Usuario') return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/login', methods=['POST']) def login() ->dict: db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) data: dict = request.get_json() query: str = ( 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Docente' session['DocenteDNI'] = rows[0].DocenteDNI session['Nombre'] = rows[0].Nombre session['Apellido'] = rows[0].Apellido session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200) else: query: str = ( 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Administrador' session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200 ) else: db_cursor.close() db_connection.close() return make_response('pos a lo mejor se equivoco?', 401) @app.route('/teacher_fullname', methods=['GET']) def teacherFullname() ->dict: if 'account_type' not in session: return make_response('pa que quieres saber eso jaja salu2', 401) elif session['account_type'] == 'Docente': return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']} elif session['account_type'] == 'Administrador': return make_response('wey no!!!', 400) @app.route('/time', methods=['GET']) def time() ->dict: if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() return {'date': current_datetime.strftime('%d/%m/%Y'), 'time': current_datetime.strftime('%H,%M,%S')} @app.route('/teacher_course_list', methods=['GET']) def teacherCourseList() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor() db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'), session['DocenteDNI'])) today_assigned_courses: list = db_cursor.fetchall() today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), today_assigned_courses) if len(today_assigned_courses) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for course in today_assigned_courses: db_cursor.execute(existence_check_query, (current_datetime. strftime('%Y/%m/%d'), course['AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: course['state'] = 'marked' elif current_datetime >= scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime): if current_datetime - scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime ) <= teacher_time_tolerance: course['state'] = 'mark_now' else: course['state'] = 'not_marked' else: course['state'] = 'waiting' db_cursor.close() db_connection.close() return jsonify(today_assigned_courses) elif session['account_type'] == 'Administrador': return make_response('ya nos jakiaron', 400) <|reserved_special_token_0|> @app.route('/admin_get_report', methods=['GET']) def adminGetReport() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Administrador': time_range = request.get_json()['time_range'] if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") report: list if time_range == 'today': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'), current_datetime.strftime('%H:%M:%S'))) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'yesterday': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)' ) current_datetime -= datetime.timedelta(days=1) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),)) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'this_week': pass elif time_range == 'this_month': pass elif time_range == 'all': pass else: return make_response('peticion invalida', 406) elif session['account_type'] == 'Docente': return make_response('ya nos jakiaron', 400) @app.route('/admin_add_teacher', methods=['POST']) def adminAddTeacher() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': data = request.get_json() db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'insert into Docente() values(?,?,?,?,?)' db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[ 'Apellido'], data['Usuario'], data['Contrasena'])) db_cursor.close() db_connection.close() return make_response('se agrego la entrada', 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_teacher_table', methods=['GET']) def adminGetTeacherTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Docente' db_cursor.execute(query) teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(teacher_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_table', methods=['GET']) def adminGetCourseTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Curso' db_cursor.execute(query) course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio', 'FechaFin'), db_cursor.fetchall()) for course in course_table: course['FechaInicio'] = course['FechaInicio'].isoformat() course['FechaFin'] = course['FechaFin'].isoformat() db_cursor.close() db_connection.close() return make_response(jsonify(course_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_classroom_table', methods=['GET']) def adminGetClassroomTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select Pabellon,Numero from Salon' db_cursor.execute(query) classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(classroom_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_assignment_table', methods=['GET']) def adminGetCourseAssignmentTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ( 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)' ) db_cursor.execute(query) course_assignment_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero', 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(course_assignment_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/logout', methods=['DELETE']) def logout() ->dict: if 'account_type' not in session: return make_response('primero inicia session broz', 301) elif session['account_type'] == 'Docente': session.pop('Usuario') session.pop('Nombre') session.pop('Apellido') return make_response('hasta luego prosor', 200) elif session['account_type'] == 'Administrador': session.pop('Usuario') return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/login', methods=['POST']) def login() ->dict: db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) data: dict = request.get_json() query: str = ( 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Docente' session['DocenteDNI'] = rows[0].DocenteDNI session['Nombre'] = rows[0].Nombre session['Apellido'] = rows[0].Apellido session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200) else: query: str = ( 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Administrador' session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200 ) else: db_cursor.close() db_connection.close() return make_response('pos a lo mejor se equivoco?', 401) @app.route('/teacher_fullname', methods=['GET']) def teacherFullname() ->dict: if 'account_type' not in session: return make_response('pa que quieres saber eso jaja salu2', 401) elif session['account_type'] == 'Docente': return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']} elif session['account_type'] == 'Administrador': return make_response('wey no!!!', 400) @app.route('/time', methods=['GET']) def time() ->dict: if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() return {'date': current_datetime.strftime('%d/%m/%Y'), 'time': current_datetime.strftime('%H,%M,%S')} @app.route('/teacher_course_list', methods=['GET']) def teacherCourseList() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor() db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'), session['DocenteDNI'])) today_assigned_courses: list = db_cursor.fetchall() today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), today_assigned_courses) if len(today_assigned_courses) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for course in today_assigned_courses: db_cursor.execute(existence_check_query, (current_datetime. strftime('%Y/%m/%d'), course['AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: course['state'] = 'marked' elif current_datetime >= scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime): if current_datetime - scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime ) <= teacher_time_tolerance: course['state'] = 'mark_now' else: course['state'] = 'not_marked' else: course['state'] = 'waiting' db_cursor.close() db_connection.close() return jsonify(today_assigned_courses) elif session['account_type'] == 'Administrador': return make_response('ya nos jakiaron', 400) @app.route('/teacher_mark', methods=['POST']) def teacherMark() ->dict: if 'account_type' not in session: return make_response('stap', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() course_to_mark: dict db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;' ) db_cursor.execute(query, (session['DocenteDNI'], current_datetime. strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'), current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance)) ) course_to_mark = db_cursor.fetchall() if len(course_to_mark) == 1: insertion_query: str = 'insert into Marcacion() values(?,?,?,?);' db_cursor.execute(insertion_query, (int(course_to_mark[0]. AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0 ].SalonID))) db_cursor.close() db_connection.close() return make_response('se marco la asistencia', 200) else: db_cursor.close() db_connection.close() return make_response('ya es tarde', 406) elif session['account_type'] == 'Administrador': return make_response( 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?' , 400) @app.route('/admin_get_report', methods=['GET']) def adminGetReport() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Administrador': time_range = request.get_json()['time_range'] if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") report: list if time_range == 'today': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'), current_datetime.strftime('%H:%M:%S'))) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'yesterday': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)' ) current_datetime -= datetime.timedelta(days=1) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),)) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'this_week': pass elif time_range == 'this_month': pass elif time_range == 'all': pass else: return make_response('peticion invalida', 406) elif session['account_type'] == 'Docente': return make_response('ya nos jakiaron', 400) @app.route('/admin_add_teacher', methods=['POST']) def adminAddTeacher() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': data = request.get_json() db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'insert into Docente() values(?,?,?,?,?)' db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[ 'Apellido'], data['Usuario'], data['Contrasena'])) db_cursor.close() db_connection.close() return make_response('se agrego la entrada', 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_teacher_table', methods=['GET']) def adminGetTeacherTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Docente' db_cursor.execute(query) teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(teacher_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_table', methods=['GET']) def adminGetCourseTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Curso' db_cursor.execute(query) course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio', 'FechaFin'), db_cursor.fetchall()) for course in course_table: course['FechaInicio'] = course['FechaInicio'].isoformat() course['FechaFin'] = course['FechaFin'].isoformat() db_cursor.close() db_connection.close() return make_response(jsonify(course_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_classroom_table', methods=['GET']) def adminGetClassroomTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select Pabellon,Numero from Salon' db_cursor.execute(query) classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(classroom_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_assignment_table', methods=['GET']) def adminGetCourseAssignmentTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ( 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)' ) db_cursor.execute(query) course_assignment_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero', 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(course_assignment_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/logout', methods=['DELETE']) def logout() ->dict: if 'account_type' not in session: return make_response('primero inicia session broz', 301) elif session['account_type'] == 'Docente': session.pop('Usuario') session.pop('Nombre') session.pop('Apellido') return make_response('hasta luego prosor', 200) elif session['account_type'] == 'Administrador': session.pop('Usuario') return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) <|reserved_special_token_1|> <|reserved_special_token_0|> testing: bool = True if testing: fake_datetime = datetime.datetime(2020, 8, 7, 15, 10) app = Flask(__name__) app.config['SECRET_KEY'] = 'clave ultra secreta' app.permanent_session_lifetime = datetime.timedelta(minutes=20) teacher_time_tolerance = datetime.timedelta(minutes=20) db = mariadb.ConnectionPool(user='brocolio', password='brocolio', host= 'localhost', pool_name='pul', pool_size=20, database='scad') spanish_days: dict = {'Monday': 'lunes', 'Tuesday': 'martes', 'Wednesday': 'miércoles', 'Thursday': 'jueves', 'Friday': 'viernes', 'Saturday': 'sábado', 'Sunday': 'domingo'} json.JSONEncoder.default = lambda self, obj: obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else str(obj) @app.route('/login', methods=['POST']) def login() ->dict: db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) data: dict = request.get_json() query: str = ( 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Docente' session['DocenteDNI'] = rows[0].DocenteDNI session['Nombre'] = rows[0].Nombre session['Apellido'] = rows[0].Apellido session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200) else: query: str = ( 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?' ) db_cursor.execute(query, (data['Usuario'], data['Contrasena'])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session['account_type'] = 'Administrador' session['Usuario'] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({'account_type': session['account_type']}, 200 ) else: db_cursor.close() db_connection.close() return make_response('pos a lo mejor se equivoco?', 401) @app.route('/teacher_fullname', methods=['GET']) def teacherFullname() ->dict: if 'account_type' not in session: return make_response('pa que quieres saber eso jaja salu2', 401) elif session['account_type'] == 'Docente': return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']} elif session['account_type'] == 'Administrador': return make_response('wey no!!!', 400) @app.route('/time', methods=['GET']) def time() ->dict: if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() return {'date': current_datetime.strftime('%d/%m/%Y'), 'time': current_datetime.strftime('%H,%M,%S')} @app.route('/teacher_course_list', methods=['GET']) def teacherCourseList() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor() db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'), session['DocenteDNI'])) today_assigned_courses: list = db_cursor.fetchall() today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), today_assigned_courses) if len(today_assigned_courses) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for course in today_assigned_courses: db_cursor.execute(existence_check_query, (current_datetime. strftime('%Y/%m/%d'), course['AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: course['state'] = 'marked' elif current_datetime >= scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime): if current_datetime - scad_utils.timeToDatetime(course[ 'HoraInicio'], current_datetime ) <= teacher_time_tolerance: course['state'] = 'mark_now' else: course['state'] = 'not_marked' else: course['state'] = 'waiting' db_cursor.close() db_connection.close() return jsonify(today_assigned_courses) elif session['account_type'] == 'Administrador': return make_response('ya nos jakiaron', 400) @app.route('/teacher_mark', methods=['POST']) def teacherMark() ->dict: if 'account_type' not in session: return make_response('stap', 401) elif session['account_type'] == 'Docente': if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() course_to_mark: dict db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;' ) db_cursor.execute(query, (session['DocenteDNI'], current_datetime. strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'), current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance)) ) course_to_mark = db_cursor.fetchall() if len(course_to_mark) == 1: insertion_query: str = 'insert into Marcacion() values(?,?,?,?);' db_cursor.execute(insertion_query, (int(course_to_mark[0]. AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0 ].SalonID))) db_cursor.close() db_connection.close() return make_response('se marco la asistencia', 200) else: db_cursor.close() db_connection.close() return make_response('ya es tarde', 406) elif session['account_type'] == 'Administrador': return make_response( 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?' , 400) @app.route('/admin_get_report', methods=['GET']) def adminGetReport() ->list: if 'account_type' not in session: return make_response('nope', 401) elif session['account_type'] == 'Administrador': time_range = request.get_json()['time_range'] if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") report: list if time_range == 'today': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? ' ) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'), current_datetime.strftime('%H:%M:%S'))) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'yesterday': query: str = ( 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)' ) current_datetime -= datetime.timedelta(days=1) db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),)) report = db_cursor.fetchall() report = scad_utils.rowToDict(('AsignacionCursoID', 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report) if len(report) > 0: existence_check_query: str = ( 'select * from Marcacion where Fecha=? and AsignacionCursoID=?' ) for assignment in report: db_cursor.execute(existence_check_query, ( current_datetime.strftime('%Y-%m-%d'), assignment[ 'AsignacionCursoID'])) if len(db_cursor.fetchall()) > 0: assignment['state'] = 'marked' else: assignment['state'] = 'not_marked' db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == 'this_week': pass elif time_range == 'this_month': pass elif time_range == 'all': pass else: return make_response('peticion invalida', 406) elif session['account_type'] == 'Docente': return make_response('ya nos jakiaron', 400) @app.route('/admin_add_teacher', methods=['POST']) def adminAddTeacher() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': data = request.get_json() db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'insert into Docente() values(?,?,?,?,?)' db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[ 'Apellido'], data['Usuario'], data['Contrasena'])) db_cursor.close() db_connection.close() return make_response('se agrego la entrada', 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_teacher_table', methods=['GET']) def adminGetTeacherTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Docente' db_cursor.execute(query) teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(teacher_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_table', methods=['GET']) def adminGetCourseTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select * from Curso' db_cursor.execute(query) course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio', 'FechaFin'), db_cursor.fetchall()) for course in course_table: course['FechaInicio'] = course['FechaInicio'].isoformat() course['FechaFin'] = course['FechaFin'].isoformat() db_cursor.close() db_connection.close() return make_response(jsonify(course_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_classroom_table', methods=['GET']) def adminGetClassroomTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = 'select Pabellon,Numero from Salon' db_cursor.execute(query) classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(classroom_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/admin_get_course_assignment_table', methods=['GET']) def adminGetCourseAssignmentTable() ->dict: if 'account_type' not in session: return make_response('', 401) elif session['account_type'] == 'Administrador': db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ( 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)' ) db_cursor.execute(query) course_assignment_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero', 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall()) db_cursor.close() db_connection.close() return make_response(jsonify(course_assignment_table), 200) elif session['account_type'] == 'Docente': return make_response('', 401) @app.route('/logout', methods=['DELETE']) def logout() ->dict: if 'account_type' not in session: return make_response('primero inicia session broz', 301) elif session['account_type'] == 'Docente': session.pop('Usuario') session.pop('Nombre') session.pop('Apellido') return make_response('hasta luego prosor', 200) elif session['account_type'] == 'Administrador': session.pop('Usuario') return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) return make_response('espero haberle sido util, hasta luego', 200) <|reserved_special_token_1|> from flask import Flask from flask import request from flask import session from flask import jsonify from flask import make_response import mariadb import datetime import json import scad_utils testing: bool = True if testing: fake_datetime = datetime.datetime(2020, 8, 7, 15, 10) app = Flask(__name__) app.config["SECRET_KEY"] = "clave ultra secreta" app.permanent_session_lifetime = datetime.timedelta(minutes=20) teacher_time_tolerance = datetime.timedelta(minutes=20) db = mariadb.ConnectionPool( user="brocolio", password="brocolio", host="localhost", pool_name="pul", pool_size=20, database="scad", ) # tmp_cursor: mysql.cursor.MySQLCursor = db.cursor() # tmp_cursor.execute("SET lc_time_names = 'es_PE';") # tmp_cursor.close() spanish_days: dict = { "Monday": "lunes", "Tuesday": "martes", "Wednesday": "miércoles", "Thursday": "jueves", "Friday": "viernes", "Saturday": "sábado", "Sunday": "domingo", } json.JSONEncoder.default = lambda self, obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else str(obj) ) @app.route("/login", methods=["POST"]) def login() -> dict: db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) data: dict = request.get_json() # consulta a la base de datos si el usuario y contrasena son validos # consulta en la tabla docente query: str = ( "select DocenteDNI, Nombre, Apellido, Usuario " "from Docente " "where Usuario=? and Contrasena=?" ) db_cursor.execute(query, (data["Usuario"], data["Contrasena"])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session["account_type"] = "Docente" session["DocenteDNI"] = rows[0].DocenteDNI session["Nombre"] = rows[0].Nombre session["Apellido"] = rows[0].Apellido session["Usuario"] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({"account_type": session["account_type"]}, 200) else: # consulta en la tabla administrador query: str = ( "select Usuario,Contrasena " "from Administrador " "where Usuario=? and Contrasena=?" ) db_cursor.execute(query, (data["Usuario"], data["Contrasena"])) rows = db_cursor.fetchall() if len(rows) == 1: session.permanent = True session["account_type"] = "Administrador" session["Usuario"] = rows[0].Usuario db_cursor.close() db_connection.close() return make_response({"account_type": session["account_type"]}, 200) # no se encontro nada else: db_cursor.close() db_connection.close() return make_response("pos a lo mejor se equivoco?", 401) @app.route("/teacher_fullname", methods=["GET"]) def teacherFullname() -> dict: if "account_type" not in session: return make_response("pa que quieres saber eso jaja salu2", 401) elif session["account_type"] == "Docente": return {"Nombre": session["Nombre"], "Apellido": session["Apellido"]} elif session["account_type"] == "Administrador": return make_response("wey no!!!", 400) @app.route("/time", methods=["GET"]) def time() -> dict: if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() return { "date": current_datetime.strftime("%d/%m/%Y"), "time": current_datetime.strftime("%H,%M,%S"), } @app.route("/teacher_course_list", methods=["GET"]) def teacherCourseList() -> list: # verificar la sesion if "account_type" not in session: # no inicio sesion return make_response("nope", 401) elif session["account_type"] == "Docente": # consultar la lista de cursos y si se han marcado o no # un curso marcado se diferencia porque el valor de Hora de la tabla Marcacion # es diferente de NULL if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor() db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( "select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero " "from AsignacionCurso a " "inner join Salon s using(SalonID) " "where Dia=dayname(?) and DocenteDNI=? " ) db_cursor.execute( query, (current_datetime.strftime("%Y/%m/%d"), session["DocenteDNI"]) ) today_assigned_courses: list = db_cursor.fetchall() # se formatea la lista de cursos today_assigned_courses = scad_utils.rowToDict( ( "AsignacionCursoID", "CursoNombre", "HoraInicio", "HoraFin", "Pabellon", "Numero", ), today_assigned_courses, ) if len(today_assigned_courses) > 0: existence_check_query: str = ( "select * from Marcacion " "where Fecha=? and AsignacionCursoID=?" ) for course in today_assigned_courses: db_cursor.execute( existence_check_query, ( current_datetime.strftime("%Y/%m/%d"), course["AsignacionCursoID"], ), ) if len(db_cursor.fetchall()) > 0: course["state"] = "marked" else: if current_datetime >= scad_utils.timeToDatetime( course["HoraInicio"], current_datetime ): if ( current_datetime - scad_utils.timeToDatetime( course["HoraInicio"], current_datetime ) <= teacher_time_tolerance ): course["state"] = "mark_now" else: course["state"] = "not_marked" else: course["state"] = "waiting" db_cursor.close() db_connection.close() return jsonify(today_assigned_courses) elif session["account_type"] == "Administrador": # el administrador no deberia usar este servicio return make_response("ya nos jakiaron", 400) @app.route("/teacher_mark", methods=["POST"]) def teacherMark() -> dict: # validar si es posible marcar el registro del curso if "account_type" not in session: # no inicio sesion return make_response("stap", 401) elif session["account_type"] == "Docente": if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() # consultar si hay algun curso para marcar course_to_mark: dict db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") query: str = ( "select AsignacionCursoID,SalonID " "from AsignacionCurso " "where DocenteDNI=? " "and Dia=dayname(?) " "and HoraInicio <=? " "and timediff(?,HoraInicio)<=?;" ) db_cursor.execute( query, ( session["DocenteDNI"], current_datetime.strftime("%Y/%m/%d"), current_datetime.strftime("%H:%M:%S"), current_datetime.strftime("%H:%M:%S"), str(teacher_time_tolerance), ), ) course_to_mark = db_cursor.fetchall() if len(course_to_mark) == 1: insertion_query: str = ("insert into Marcacion() " "values(?,?,?,?);") db_cursor.execute( insertion_query, ( int(course_to_mark[0].AsignacionCursoID), current_datetime.strftime("%Y/%m/%d"), current_datetime.strftime("%H:%M:%S"), int(course_to_mark[0].SalonID), ), ) db_cursor.close() db_connection.close() return make_response("se marco la asistencia", 200) else: db_cursor.close() db_connection.close() return make_response("ya es tarde", 406) elif session["account_type"] == "Administrador": return make_response( "papu, si ya nos jakiaste por lo menos usa los servicios correctos no?", 400 ) @app.route("/admin_get_report", methods=["GET"]) def adminGetReport() -> list: if "account_type" not in session: # no inicio sesion return make_response("nope", 401) elif session["account_type"] == "Administrador": time_range = request.get_json()["time_range"] if testing: current_datetime = fake_datetime else: current_datetime = datetime.datetime.now() db_connection = db.get_connection() db_cursor = db_connection.cursor(named_tuple=True) db_cursor.execute("SET lc_time_names = 'es_PE'") report: list if time_range == "today": query: str = ( "select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, " "a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero " "from AsignacionCurso a " "inner join Salon s using(SalonID) " "inner join Docente d using(DocenteDNI) " "where Dia=dayname(?) and a.HoraInicio<? " ) db_cursor.execute( query, ( current_datetime.strftime("%Y-%m-%d"), current_datetime.strftime("%H:%M:%S"), ), ) report = db_cursor.fetchall() # se formatea la lista de cursos report = scad_utils.rowToDict( ( "AsignacionCursoID", "DocenteDNI", "Nombre", "Apellido", "CursoNombre", "HoraInicio", "HoraFin", "Pabellon", "Numero", ), report, ) if len(report) > 0: existence_check_query: str = ( "select * from Marcacion " "where Fecha=? and AsignacionCursoID=?" ) for assignment in report: db_cursor.execute( existence_check_query, ( current_datetime.strftime("%Y-%m-%d"), assignment["AsignacionCursoID"], ), ) if len(db_cursor.fetchall()) > 0: assignment["state"] = "marked" else: assignment["state"] = "not_marked" db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == "yesterday": query: str = ( "select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, " "a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero " "from AsignacionCurso a " "inner join Salon s using(SalonID) " "inner join Docente d using(DocenteDNI) " "where Dia=dayname(?)" ) current_datetime -= datetime.timedelta(days=1) db_cursor.execute( query, (current_datetime.strftime("%Y-%m-%d"),), ) report = db_cursor.fetchall() # se formatea la lista de cursos report = scad_utils.rowToDict( ( "AsignacionCursoID", "DocenteDNI", "Nombre", "Apellido", "CursoNombre", "HoraInicio", "HoraFin", "Pabellon", "Numero", ), report, ) if len(report) > 0: existence_check_query: str = ( "select * from Marcacion " "where Fecha=? and AsignacionCursoID=?" ) for assignment in report: db_cursor.execute( existence_check_query, ( current_datetime.strftime("%Y-%m-%d"), assignment["AsignacionCursoID"], ), ) if len(db_cursor.fetchall()) > 0: assignment["state"] = "marked" else: assignment["state"] = "not_marked" db_cursor.close() db_connection.close() return make_response(jsonify(report), 200) elif time_range == "this_week": pass elif time_range == "this_month": pass elif time_range == "all": pass else: return make_response("peticion invalida", 406) elif session["account_type"] == "Docente": # el administrador no deberia usar este servicio return make_response("ya nos jakiaron", 400) @app.route("/admin_add_teacher", methods=["POST"]) def adminAddTeacher() -> dict: if "account_type" not in session: return make_response("", 401) elif session["account_type"] == "Administrador": data = request.get_json() db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ("insert into Docente() values(?,?,?,?,?)") db_cursor.execute( query, ( data["DocenteDNI"], data["Nombre"], data["Apellido"], data["Usuario"], data["Contrasena"], ), ) db_cursor.close() db_connection.close() return make_response("se agrego la entrada", 200) elif session["account_type"] == "Docente": return make_response("", 401) @app.route("/admin_get_teacher_table", methods=["GET"]) def adminGetTeacherTable() -> dict: if "account_type" not in session: return make_response("", 401) elif session["account_type"] == "Administrador": db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ("select * from Docente") db_cursor.execute(query) teacher_table = scad_utils.rowToDict( ("DocenteDNI", "Nombre", "Apellido", "Usuario", "Contrasena"), db_cursor.fetchall(), ) db_cursor.close() db_connection.close() return make_response(jsonify(teacher_table), 200) elif session["account_type"] == "Docente": return make_response("", 401) @app.route("/admin_get_course_table", methods=["GET"]) def adminGetCourseTable() -> dict: if "account_type" not in session: return make_response("", 401) elif session["account_type"] == "Administrador": db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ("select * from Curso") db_cursor.execute(query) course_table = scad_utils.rowToDict( ("CursoNombre", "FechaInicio", "FechaFin"), db_cursor.fetchall(), ) for course in course_table: course["FechaInicio"] = course["FechaInicio"].isoformat() course["FechaFin"] = course["FechaFin"].isoformat() db_cursor.close() db_connection.close() return make_response(jsonify(course_table), 200) elif session["account_type"] == "Docente": return make_response("", 401) @app.route("/admin_get_classroom_table", methods=["GET"]) def adminGetClassroomTable() -> dict: if "account_type" not in session: return make_response("", 401) elif session["account_type"] == "Administrador": db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ("select Pabellon,Numero from Salon") db_cursor.execute(query) classroom_table = scad_utils.rowToDict( ("Pabellon", "Numero"), db_cursor.fetchall(), ) db_cursor.close() db_connection.close() return make_response(jsonify(classroom_table), 200) elif session["account_type"] == "Docente": return make_response("", 401) @app.route("/admin_get_course_assignment_table", methods=["GET"]) def adminGetCourseAssignmentTable() -> dict: if "account_type" not in session: return make_response("", 401) elif session["account_type"] == "Administrador": db_connection = db.get_connection() db_cursor = db_connection.cursor() query: str = ( "select d.DocenteDNI, d.Nombre, d.Apellido," "a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia " "from AsignacionCurso a " "inner join Salon s using(SalonID) " "inner join Docente d using(DocenteDNI)" ) db_cursor.execute(query) course_assignment_table = scad_utils.rowToDict( ( "DocenteDNI", "Nombre", "Apellido", "CursoNombre", "Pabellon", "Numero", "HoraInicio", "HoraFin", "Dia", ), db_cursor.fetchall(), ) db_cursor.close() db_connection.close() return make_response(jsonify(course_assignment_table), 200) elif session["account_type"] == "Docente": return make_response("", 401) @app.route("/logout", methods=["DELETE"]) def logout() -> dict: if "account_type" not in session: return make_response("primero inicia session broz", 301) else: if session["account_type"] == "Docente": session.pop("Usuario") session.pop("Nombre") session.pop("Apellido") return make_response("hasta luego prosor", 200) elif session["account_type"] == "Administrador": session.pop("Usuario") return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200) return make_response("espero haberle sido util, hasta luego", 200)
flexible
{ "blob_id": "ff6b7e2097d78b013f8f5989adee47156579cb9e", "index": 6226, "step-1": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\n<mask token>\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n", "step-2": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\n<mask token>\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n", "step-3": "<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/teacher_mark', methods=['POST'])\ndef teacherMark() ->dict:\n if 'account_type' not in session:\n return make_response('stap', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;'\n )\n db_cursor.execute(query, (session['DocenteDNI'], current_datetime.\n strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'),\n current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance))\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = 'insert into Marcacion() values(?,?,?,?);'\n db_cursor.execute(insertion_query, (int(course_to_mark[0].\n AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'),\n current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0\n ].SalonID)))\n db_cursor.close()\n db_connection.close()\n return make_response('se marco la asistencia', 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('ya es tarde', 406)\n elif session['account_type'] == 'Administrador':\n return make_response(\n 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?'\n , 400)\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n", "step-4": "<mask token>\ntesting: bool = True\nif testing:\n fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'clave ultra secreta'\napp.permanent_session_lifetime = datetime.timedelta(minutes=20)\nteacher_time_tolerance = datetime.timedelta(minutes=20)\ndb = mariadb.ConnectionPool(user='brocolio', password='brocolio', host=\n 'localhost', pool_name='pul', pool_size=20, database='scad')\nspanish_days: dict = {'Monday': 'lunes', 'Tuesday': 'martes', 'Wednesday':\n 'miércoles', 'Thursday': 'jueves', 'Friday': 'viernes', 'Saturday':\n 'sábado', 'Sunday': 'domingo'}\njson.JSONEncoder.default = lambda self, obj: obj.isoformat() if isinstance(obj,\n datetime.datetime) or isinstance(obj, datetime.date) else str(obj)\n\n\[email protected]('/login', methods=['POST'])\ndef login() ->dict:\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n query: str = (\n 'select DocenteDNI, Nombre, Apellido, Usuario from Docente where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Docente'\n session['DocenteDNI'] = rows[0].DocenteDNI\n session['Nombre'] = rows[0].Nombre\n session['Apellido'] = rows[0].Apellido\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200)\n else:\n query: str = (\n 'select Usuario,Contrasena from Administrador where Usuario=? and Contrasena=?'\n )\n db_cursor.execute(query, (data['Usuario'], data['Contrasena']))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session['account_type'] = 'Administrador'\n session['Usuario'] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({'account_type': session['account_type']}, 200\n )\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('pos a lo mejor se equivoco?', 401)\n\n\[email protected]('/teacher_fullname', methods=['GET'])\ndef teacherFullname() ->dict:\n if 'account_type' not in session:\n return make_response('pa que quieres saber eso jaja salu2', 401)\n elif session['account_type'] == 'Docente':\n return {'Nombre': session['Nombre'], 'Apellido': session['Apellido']}\n elif session['account_type'] == 'Administrador':\n return make_response('wey no!!!', 400)\n\n\[email protected]('/time', methods=['GET'])\ndef time() ->dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {'date': current_datetime.strftime('%d/%m/%Y'), 'time':\n current_datetime.strftime('%H,%M,%S')}\n\n\[email protected]('/teacher_course_list', methods=['GET'])\ndef teacherCourseList() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) where Dia=dayname(?) and DocenteDNI=? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y/%m/%d'),\n session['DocenteDNI']))\n today_assigned_courses: list = db_cursor.fetchall()\n today_assigned_courses = scad_utils.rowToDict(('AsignacionCursoID',\n 'CursoNombre', 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'),\n today_assigned_courses)\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for course in today_assigned_courses:\n db_cursor.execute(existence_check_query, (current_datetime.\n strftime('%Y/%m/%d'), course['AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n course['state'] = 'marked'\n elif current_datetime >= scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime):\n if current_datetime - scad_utils.timeToDatetime(course[\n 'HoraInicio'], current_datetime\n ) <= teacher_time_tolerance:\n course['state'] = 'mark_now'\n else:\n course['state'] = 'not_marked'\n else:\n course['state'] = 'waiting'\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n elif session['account_type'] == 'Administrador':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/teacher_mark', methods=['POST'])\ndef teacherMark() ->dict:\n if 'account_type' not in session:\n return make_response('stap', 401)\n elif session['account_type'] == 'Docente':\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n 'select AsignacionCursoID,SalonID from AsignacionCurso where DocenteDNI=? and Dia=dayname(?) and HoraInicio <=? and timediff(?,HoraInicio)<=?;'\n )\n db_cursor.execute(query, (session['DocenteDNI'], current_datetime.\n strftime('%Y/%m/%d'), current_datetime.strftime('%H:%M:%S'),\n current_datetime.strftime('%H:%M:%S'), str(teacher_time_tolerance))\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = 'insert into Marcacion() values(?,?,?,?);'\n db_cursor.execute(insertion_query, (int(course_to_mark[0].\n AsignacionCursoID), current_datetime.strftime('%Y/%m/%d'),\n current_datetime.strftime('%H:%M:%S'), int(course_to_mark[0\n ].SalonID)))\n db_cursor.close()\n db_connection.close()\n return make_response('se marco la asistencia', 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response('ya es tarde', 406)\n elif session['account_type'] == 'Administrador':\n return make_response(\n 'papu, si ya nos jakiaste por lo menos usa los servicios correctos no?'\n , 400)\n\n\[email protected]('/admin_get_report', methods=['GET'])\ndef adminGetReport() ->list:\n if 'account_type' not in session:\n return make_response('nope', 401)\n elif session['account_type'] == 'Administrador':\n time_range = request.get_json()['time_range']\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == 'today':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?) and a.HoraInicio<? '\n )\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),\n current_datetime.strftime('%H:%M:%S')))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'yesterday':\n query: str = (\n 'select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI) where Dia=dayname(?)'\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(query, (current_datetime.strftime('%Y-%m-%d'),))\n report = db_cursor.fetchall()\n report = scad_utils.rowToDict(('AsignacionCursoID',\n 'DocenteDNI', 'Nombre', 'Apellido', 'CursoNombre',\n 'HoraInicio', 'HoraFin', 'Pabellon', 'Numero'), report)\n if len(report) > 0:\n existence_check_query: str = (\n 'select * from Marcacion where Fecha=? and AsignacionCursoID=?'\n )\n for assignment in report:\n db_cursor.execute(existence_check_query, (\n current_datetime.strftime('%Y-%m-%d'), assignment[\n 'AsignacionCursoID']))\n if len(db_cursor.fetchall()) > 0:\n assignment['state'] = 'marked'\n else:\n assignment['state'] = 'not_marked'\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == 'this_week':\n pass\n elif time_range == 'this_month':\n pass\n elif time_range == 'all':\n pass\n else:\n return make_response('peticion invalida', 406)\n elif session['account_type'] == 'Docente':\n return make_response('ya nos jakiaron', 400)\n\n\[email protected]('/admin_add_teacher', methods=['POST'])\ndef adminAddTeacher() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'insert into Docente() values(?,?,?,?,?)'\n db_cursor.execute(query, (data['DocenteDNI'], data['Nombre'], data[\n 'Apellido'], data['Usuario'], data['Contrasena']))\n db_cursor.close()\n db_connection.close()\n return make_response('se agrego la entrada', 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_teacher_table', methods=['GET'])\ndef adminGetTeacherTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Docente'\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(('DocenteDNI', 'Nombre',\n 'Apellido', 'Usuario', 'Contrasena'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_table', methods=['GET'])\ndef adminGetCourseTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select * from Curso'\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(('CursoNombre', 'FechaInicio',\n 'FechaFin'), db_cursor.fetchall())\n for course in course_table:\n course['FechaInicio'] = course['FechaInicio'].isoformat()\n course['FechaFin'] = course['FechaFin'].isoformat()\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_classroom_table', methods=['GET'])\ndef adminGetClassroomTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = 'select Pabellon,Numero from Salon'\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(('Pabellon', 'Numero'),\n db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/admin_get_course_assignment_table', methods=['GET'])\ndef adminGetCourseAssignmentTable() ->dict:\n if 'account_type' not in session:\n return make_response('', 401)\n elif session['account_type'] == 'Administrador':\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n query: str = (\n 'select d.DocenteDNI, d.Nombre, d.Apellido,a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia from AsignacionCurso a inner join Salon s using(SalonID) inner join Docente d using(DocenteDNI)'\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(('DocenteDNI',\n 'Nombre', 'Apellido', 'CursoNombre', 'Pabellon', 'Numero',\n 'HoraInicio', 'HoraFin', 'Dia'), db_cursor.fetchall())\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session['account_type'] == 'Docente':\n return make_response('', 401)\n\n\[email protected]('/logout', methods=['DELETE'])\ndef logout() ->dict:\n if 'account_type' not in session:\n return make_response('primero inicia session broz', 301)\n elif session['account_type'] == 'Docente':\n session.pop('Usuario')\n session.pop('Nombre')\n session.pop('Apellido')\n return make_response('hasta luego prosor', 200)\n elif session['account_type'] == 'Administrador':\n session.pop('Usuario')\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n return make_response('espero haberle sido util, hasta luego', 200)\n", "step-5": "from flask import Flask\nfrom flask import request\nfrom flask import session\nfrom flask import jsonify\nfrom flask import make_response\nimport mariadb\nimport datetime\nimport json\nimport scad_utils\n\ntesting: bool = True\nif testing:\n fake_datetime = datetime.datetime(2020, 8, 7, 15, 10)\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"clave ultra secreta\"\napp.permanent_session_lifetime = datetime.timedelta(minutes=20)\n\nteacher_time_tolerance = datetime.timedelta(minutes=20)\ndb = mariadb.ConnectionPool(\n user=\"brocolio\",\n password=\"brocolio\",\n host=\"localhost\",\n pool_name=\"pul\",\n pool_size=20,\n database=\"scad\",\n)\n\n# tmp_cursor: mysql.cursor.MySQLCursor = db.cursor()\n# tmp_cursor.execute(\"SET lc_time_names = 'es_PE';\")\n# tmp_cursor.close()\nspanish_days: dict = {\n \"Monday\": \"lunes\",\n \"Tuesday\": \"martes\",\n \"Wednesday\": \"miércoles\",\n \"Thursday\": \"jueves\",\n \"Friday\": \"viernes\",\n \"Saturday\": \"sábado\",\n \"Sunday\": \"domingo\",\n}\n\n\njson.JSONEncoder.default = lambda self, obj: (\n obj.isoformat()\n if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)\n else str(obj)\n)\n\n\[email protected](\"/login\", methods=[\"POST\"])\ndef login() -> dict:\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n data: dict = request.get_json()\n\n # consulta a la base de datos si el usuario y contrasena son validos\n # consulta en la tabla docente\n query: str = (\n \"select DocenteDNI, Nombre, Apellido, Usuario \"\n \"from Docente \"\n \"where Usuario=? and Contrasena=?\"\n )\n db_cursor.execute(query, (data[\"Usuario\"], data[\"Contrasena\"]))\n rows = db_cursor.fetchall()\n if len(rows) == 1:\n session.permanent = True\n session[\"account_type\"] = \"Docente\"\n session[\"DocenteDNI\"] = rows[0].DocenteDNI\n session[\"Nombre\"] = rows[0].Nombre\n session[\"Apellido\"] = rows[0].Apellido\n session[\"Usuario\"] = rows[0].Usuario\n\n db_cursor.close()\n db_connection.close()\n return make_response({\"account_type\": session[\"account_type\"]}, 200)\n\n else:\n # consulta en la tabla administrador\n query: str = (\n \"select Usuario,Contrasena \"\n \"from Administrador \"\n \"where Usuario=? and Contrasena=?\"\n )\n db_cursor.execute(query, (data[\"Usuario\"], data[\"Contrasena\"]))\n rows = db_cursor.fetchall()\n\n if len(rows) == 1:\n session.permanent = True\n session[\"account_type\"] = \"Administrador\"\n session[\"Usuario\"] = rows[0].Usuario\n db_cursor.close()\n db_connection.close()\n return make_response({\"account_type\": session[\"account_type\"]}, 200)\n # no se encontro nada\n else:\n db_cursor.close()\n db_connection.close()\n return make_response(\"pos a lo mejor se equivoco?\", 401)\n\n\[email protected](\"/teacher_fullname\", methods=[\"GET\"])\ndef teacherFullname() -> dict:\n if \"account_type\" not in session:\n return make_response(\"pa que quieres saber eso jaja salu2\", 401)\n elif session[\"account_type\"] == \"Docente\":\n return {\"Nombre\": session[\"Nombre\"], \"Apellido\": session[\"Apellido\"]}\n elif session[\"account_type\"] == \"Administrador\":\n return make_response(\"wey no!!!\", 400)\n\n\[email protected](\"/time\", methods=[\"GET\"])\ndef time() -> dict:\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n return {\n \"date\": current_datetime.strftime(\"%d/%m/%Y\"),\n \"time\": current_datetime.strftime(\"%H,%M,%S\"),\n }\n\n\[email protected](\"/teacher_course_list\", methods=[\"GET\"])\ndef teacherCourseList() -> list:\n # verificar la sesion\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"nope\", 401)\n elif session[\"account_type\"] == \"Docente\":\n # consultar la lista de cursos y si se han marcado o no\n # un curso marcado se diferencia porque el valor de Hora de la tabla Marcacion\n # es diferente de NULL\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n \"select AsignacionCursoID, a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"where Dia=dayname(?) and DocenteDNI=? \"\n )\n db_cursor.execute(\n query, (current_datetime.strftime(\"%Y/%m/%d\"), session[\"DocenteDNI\"])\n )\n today_assigned_courses: list = db_cursor.fetchall()\n # se formatea la lista de cursos\n today_assigned_courses = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n today_assigned_courses,\n )\n if len(today_assigned_courses) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for course in today_assigned_courses:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y/%m/%d\"),\n course[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n course[\"state\"] = \"marked\"\n else:\n if current_datetime >= scad_utils.timeToDatetime(\n course[\"HoraInicio\"], current_datetime\n ):\n if (\n current_datetime\n - scad_utils.timeToDatetime(\n course[\"HoraInicio\"], current_datetime\n )\n <= teacher_time_tolerance\n ):\n course[\"state\"] = \"mark_now\"\n else:\n course[\"state\"] = \"not_marked\"\n else:\n course[\"state\"] = \"waiting\"\n\n db_cursor.close()\n db_connection.close()\n return jsonify(today_assigned_courses)\n\n elif session[\"account_type\"] == \"Administrador\":\n # el administrador no deberia usar este servicio\n return make_response(\"ya nos jakiaron\", 400)\n\n\[email protected](\"/teacher_mark\", methods=[\"POST\"])\ndef teacherMark() -> dict:\n # validar si es posible marcar el registro del curso\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"stap\", 401)\n elif session[\"account_type\"] == \"Docente\":\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n # consultar si hay algun curso para marcar\n course_to_mark: dict\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n query: str = (\n \"select AsignacionCursoID,SalonID \"\n \"from AsignacionCurso \"\n \"where DocenteDNI=? \"\n \"and Dia=dayname(?) \"\n \"and HoraInicio <=? \"\n \"and timediff(?,HoraInicio)<=?;\"\n )\n db_cursor.execute(\n query,\n (\n session[\"DocenteDNI\"],\n current_datetime.strftime(\"%Y/%m/%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n str(teacher_time_tolerance),\n ),\n )\n course_to_mark = db_cursor.fetchall()\n if len(course_to_mark) == 1:\n insertion_query: str = (\"insert into Marcacion() \" \"values(?,?,?,?);\")\n\n db_cursor.execute(\n insertion_query,\n (\n int(course_to_mark[0].AsignacionCursoID),\n current_datetime.strftime(\"%Y/%m/%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n int(course_to_mark[0].SalonID),\n ),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(\"se marco la asistencia\", 200)\n else:\n db_cursor.close()\n db_connection.close()\n return make_response(\"ya es tarde\", 406)\n\n elif session[\"account_type\"] == \"Administrador\":\n return make_response(\n \"papu, si ya nos jakiaste por lo menos usa los servicios correctos no?\", 400\n )\n\n\[email protected](\"/admin_get_report\", methods=[\"GET\"])\ndef adminGetReport() -> list:\n if \"account_type\" not in session:\n # no inicio sesion\n return make_response(\"nope\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n time_range = request.get_json()[\"time_range\"]\n if testing:\n current_datetime = fake_datetime\n else:\n current_datetime = datetime.datetime.now()\n\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor(named_tuple=True)\n db_cursor.execute(\"SET lc_time_names = 'es_PE'\")\n report: list\n if time_range == \"today\":\n query: str = (\n \"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, \"\n \"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI) \"\n \"where Dia=dayname(?) and a.HoraInicio<? \"\n )\n db_cursor.execute(\n query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n current_datetime.strftime(\"%H:%M:%S\"),\n ),\n )\n report = db_cursor.fetchall()\n # se formatea la lista de cursos\n report = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n report,\n )\n if len(report) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for assignment in report:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n assignment[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n assignment[\"state\"] = \"marked\"\n else:\n assignment[\"state\"] = \"not_marked\"\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == \"yesterday\":\n query: str = (\n \"select a.AsignacionCursoID,d.DocenteDNI,d.Nombre,d.Apellido, \"\n \"a.CursoNombre, a.HoraInicio, a.HoraFin, s.Pabellon, s.Numero \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI) \"\n \"where Dia=dayname(?)\"\n )\n current_datetime -= datetime.timedelta(days=1)\n db_cursor.execute(\n query, (current_datetime.strftime(\"%Y-%m-%d\"),),\n )\n report = db_cursor.fetchall()\n # se formatea la lista de cursos\n report = scad_utils.rowToDict(\n (\n \"AsignacionCursoID\",\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Pabellon\",\n \"Numero\",\n ),\n report,\n )\n if len(report) > 0:\n existence_check_query: str = (\n \"select * from Marcacion \" \"where Fecha=? and AsignacionCursoID=?\"\n )\n for assignment in report:\n db_cursor.execute(\n existence_check_query,\n (\n current_datetime.strftime(\"%Y-%m-%d\"),\n assignment[\"AsignacionCursoID\"],\n ),\n )\n if len(db_cursor.fetchall()) > 0:\n assignment[\"state\"] = \"marked\"\n else:\n assignment[\"state\"] = \"not_marked\"\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(report), 200)\n elif time_range == \"this_week\":\n pass\n elif time_range == \"this_month\":\n pass\n elif time_range == \"all\":\n pass\n else:\n return make_response(\"peticion invalida\", 406)\n elif session[\"account_type\"] == \"Docente\":\n # el administrador no deberia usar este servicio\n return make_response(\"ya nos jakiaron\", 400)\n\n\[email protected](\"/admin_add_teacher\", methods=[\"POST\"])\ndef adminAddTeacher() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n data = request.get_json()\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"insert into Docente() values(?,?,?,?,?)\")\n db_cursor.execute(\n query,\n (\n data[\"DocenteDNI\"],\n data[\"Nombre\"],\n data[\"Apellido\"],\n data[\"Usuario\"],\n data[\"Contrasena\"],\n ),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(\"se agrego la entrada\", 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_teacher_table\", methods=[\"GET\"])\ndef adminGetTeacherTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select * from Docente\")\n db_cursor.execute(query)\n teacher_table = scad_utils.rowToDict(\n (\"DocenteDNI\", \"Nombre\", \"Apellido\", \"Usuario\", \"Contrasena\"),\n db_cursor.fetchall(),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(teacher_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_course_table\", methods=[\"GET\"])\ndef adminGetCourseTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select * from Curso\")\n db_cursor.execute(query)\n course_table = scad_utils.rowToDict(\n (\"CursoNombre\", \"FechaInicio\", \"FechaFin\"), db_cursor.fetchall(),\n )\n for course in course_table:\n course[\"FechaInicio\"] = course[\"FechaInicio\"].isoformat()\n course[\"FechaFin\"] = course[\"FechaFin\"].isoformat()\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_classroom_table\", methods=[\"GET\"])\ndef adminGetClassroomTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\"select Pabellon,Numero from Salon\")\n db_cursor.execute(query)\n classroom_table = scad_utils.rowToDict(\n (\"Pabellon\", \"Numero\"), db_cursor.fetchall(),\n )\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(classroom_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/admin_get_course_assignment_table\", methods=[\"GET\"])\ndef adminGetCourseAssignmentTable() -> dict:\n if \"account_type\" not in session:\n return make_response(\"\", 401)\n elif session[\"account_type\"] == \"Administrador\":\n db_connection = db.get_connection()\n db_cursor = db_connection.cursor()\n\n query: str = (\n \"select d.DocenteDNI, d.Nombre, d.Apellido,\"\n \"a.CursoNombre, s.Pabellon,s.Numero, a.HoraInicio, a.HoraFin,a.Dia \"\n \"from AsignacionCurso a \"\n \"inner join Salon s using(SalonID) \"\n \"inner join Docente d using(DocenteDNI)\"\n )\n db_cursor.execute(query)\n course_assignment_table = scad_utils.rowToDict(\n (\n \"DocenteDNI\",\n \"Nombre\",\n \"Apellido\",\n \"CursoNombre\",\n \"Pabellon\",\n \"Numero\",\n \"HoraInicio\",\n \"HoraFin\",\n \"Dia\",\n ),\n db_cursor.fetchall(),\n )\n\n db_cursor.close()\n db_connection.close()\n return make_response(jsonify(course_assignment_table), 200)\n elif session[\"account_type\"] == \"Docente\":\n return make_response(\"\", 401)\n\n\[email protected](\"/logout\", methods=[\"DELETE\"])\ndef logout() -> dict:\n if \"account_type\" not in session:\n return make_response(\"primero inicia session broz\", 301)\n else:\n if session[\"account_type\"] == \"Docente\":\n session.pop(\"Usuario\")\n session.pop(\"Nombre\")\n session.pop(\"Apellido\")\n return make_response(\"hasta luego prosor\", 200)\n elif session[\"account_type\"] == \"Administrador\":\n session.pop(\"Usuario\")\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n return make_response(\"espero haberle sido util, hasta luego\", 200)\n", "step-ids": [ 10, 11, 12, 14, 16 ] }
[ 10, 11, 12, 14, 16 ]
from vkaudiotoken import get_vk_official_token import requests import json import telebot import urllib import sys #check start args try: if len(sys.argv) != 4: raise Exception botApiKey = sys.argv[1] login = sys.argv[2] password = sys.argv[3] except: print('Not enough arguments') print('Example: py filename.py botApiKey login password') print('') sys.exit() #check apikey try: bot = telebot.TeleBot(botApiKey) except: print('Bot Error: Check botApiKey') print('') sys.exit() #check vk auth try: tokenObj = get_vk_official_token(login, password) except: print('Login Error: Check login and password') print('') sys.exit() #create vk session token = tokenObj['token'] user_agent = tokenObj['user_agent'] sess = requests.session() sess.headers.update({'User-Agent': user_agent}) #trackList transform def getTracks(result): data = json.loads(result.content.decode('utf-8')) tracks = data['response']['items'] tracks.reverse() return tracks #m3u8 url convet to mp3 url def getMp3FromM3u8(url): if url.find("index.m3u8?") == -1: return url parts = url.split('/') newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5] + '.mp3' return newUrl #telegram bot @bot.message_handler(content_types=['text']) def get_text_messages(message): if message.text == "/start": bot.send_message(message.from_user.id, "Moscow Music Bot. Введите число треков") elif message.text == "/help": bot.send_message(message.from_user.id, "Введите число треков") else: try: count = int(message.text) tracks = getTracks(sess.get( "https://api.vk.com/method/audio.get", params=[('access_token', token), ('count', count), ('v', '5.95')] )) for track in tracks: title = track['title'] artist = track['artist'] duration = track['duration'] url = getMp3FromM3u8(track['url']) file = urllib.request.urlopen(url) try: bot.send_audio(message.from_user.id, file, duration=duration, title=title, performer=artist) except: bot.send_message(message.from_user.id, "Ошибка загрузки {}".format(title)) except: bot.send_message(message.from_user.id, "Ошибка исполнения") bot.infinity_polling()
normal
{ "blob_id": "47817d6cf58ac54e501ed24ae3ababc821bdd5c8", "index": 1949, "step-1": "<mask token>\n\n\ndef getTracks(result):\n data = json.loads(result.content.decode('utf-8'))\n tracks = data['response']['items']\n tracks.reverse()\n return tracks\n\n\ndef getMp3FromM3u8(url):\n if url.find('index.m3u8?') == -1:\n return url\n parts = url.split('/')\n newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5\n ] + '.mp3'\n return newUrl\n\n\[email protected]_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == '/start':\n bot.send_message(message.from_user.id,\n 'Moscow Music Bot. Введите число треков')\n elif message.text == '/help':\n bot.send_message(message.from_user.id, 'Введите число треков')\n else:\n try:\n count = int(message.text)\n tracks = getTracks(sess.get(\n 'https://api.vk.com/method/audio.get', params=[(\n 'access_token', token), ('count', count), ('v', '5.95')]))\n for track in tracks:\n title = track['title']\n artist = track['artist']\n duration = track['duration']\n url = getMp3FromM3u8(track['url'])\n file = urllib.request.urlopen(url)\n try:\n bot.send_audio(message.from_user.id, file, duration=\n duration, title=title, performer=artist)\n except:\n bot.send_message(message.from_user.id,\n 'Ошибка загрузки {}'.format(title))\n except:\n bot.send_message(message.from_user.id, 'Ошибка исполнения')\n\n\n<mask token>\n", "step-2": "<mask token>\ntry:\n if len(sys.argv) != 4:\n raise Exception\n botApiKey = sys.argv[1]\n login = sys.argv[2]\n password = sys.argv[3]\nexcept:\n print('Not enough arguments')\n print('Example: py filename.py botApiKey login password')\n print('')\n sys.exit()\ntry:\n bot = telebot.TeleBot(botApiKey)\nexcept:\n print('Bot Error: Check botApiKey')\n print('')\n sys.exit()\ntry:\n tokenObj = get_vk_official_token(login, password)\nexcept:\n print('Login Error: Check login and password')\n print('')\n sys.exit()\n<mask token>\nsess.headers.update({'User-Agent': user_agent})\n\n\ndef getTracks(result):\n data = json.loads(result.content.decode('utf-8'))\n tracks = data['response']['items']\n tracks.reverse()\n return tracks\n\n\ndef getMp3FromM3u8(url):\n if url.find('index.m3u8?') == -1:\n return url\n parts = url.split('/')\n newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5\n ] + '.mp3'\n return newUrl\n\n\[email protected]_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == '/start':\n bot.send_message(message.from_user.id,\n 'Moscow Music Bot. Введите число треков')\n elif message.text == '/help':\n bot.send_message(message.from_user.id, 'Введите число треков')\n else:\n try:\n count = int(message.text)\n tracks = getTracks(sess.get(\n 'https://api.vk.com/method/audio.get', params=[(\n 'access_token', token), ('count', count), ('v', '5.95')]))\n for track in tracks:\n title = track['title']\n artist = track['artist']\n duration = track['duration']\n url = getMp3FromM3u8(track['url'])\n file = urllib.request.urlopen(url)\n try:\n bot.send_audio(message.from_user.id, file, duration=\n duration, title=title, performer=artist)\n except:\n bot.send_message(message.from_user.id,\n 'Ошибка загрузки {}'.format(title))\n except:\n bot.send_message(message.from_user.id, 'Ошибка исполнения')\n\n\nbot.infinity_polling()\n", "step-3": "<mask token>\ntry:\n if len(sys.argv) != 4:\n raise Exception\n botApiKey = sys.argv[1]\n login = sys.argv[2]\n password = sys.argv[3]\nexcept:\n print('Not enough arguments')\n print('Example: py filename.py botApiKey login password')\n print('')\n sys.exit()\ntry:\n bot = telebot.TeleBot(botApiKey)\nexcept:\n print('Bot Error: Check botApiKey')\n print('')\n sys.exit()\ntry:\n tokenObj = get_vk_official_token(login, password)\nexcept:\n print('Login Error: Check login and password')\n print('')\n sys.exit()\ntoken = tokenObj['token']\nuser_agent = tokenObj['user_agent']\nsess = requests.session()\nsess.headers.update({'User-Agent': user_agent})\n\n\ndef getTracks(result):\n data = json.loads(result.content.decode('utf-8'))\n tracks = data['response']['items']\n tracks.reverse()\n return tracks\n\n\ndef getMp3FromM3u8(url):\n if url.find('index.m3u8?') == -1:\n return url\n parts = url.split('/')\n newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5\n ] + '.mp3'\n return newUrl\n\n\[email protected]_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == '/start':\n bot.send_message(message.from_user.id,\n 'Moscow Music Bot. Введите число треков')\n elif message.text == '/help':\n bot.send_message(message.from_user.id, 'Введите число треков')\n else:\n try:\n count = int(message.text)\n tracks = getTracks(sess.get(\n 'https://api.vk.com/method/audio.get', params=[(\n 'access_token', token), ('count', count), ('v', '5.95')]))\n for track in tracks:\n title = track['title']\n artist = track['artist']\n duration = track['duration']\n url = getMp3FromM3u8(track['url'])\n file = urllib.request.urlopen(url)\n try:\n bot.send_audio(message.from_user.id, file, duration=\n duration, title=title, performer=artist)\n except:\n bot.send_message(message.from_user.id,\n 'Ошибка загрузки {}'.format(title))\n except:\n bot.send_message(message.from_user.id, 'Ошибка исполнения')\n\n\nbot.infinity_polling()\n", "step-4": "from vkaudiotoken import get_vk_official_token\nimport requests\nimport json\nimport telebot\nimport urllib\nimport sys\ntry:\n if len(sys.argv) != 4:\n raise Exception\n botApiKey = sys.argv[1]\n login = sys.argv[2]\n password = sys.argv[3]\nexcept:\n print('Not enough arguments')\n print('Example: py filename.py botApiKey login password')\n print('')\n sys.exit()\ntry:\n bot = telebot.TeleBot(botApiKey)\nexcept:\n print('Bot Error: Check botApiKey')\n print('')\n sys.exit()\ntry:\n tokenObj = get_vk_official_token(login, password)\nexcept:\n print('Login Error: Check login and password')\n print('')\n sys.exit()\ntoken = tokenObj['token']\nuser_agent = tokenObj['user_agent']\nsess = requests.session()\nsess.headers.update({'User-Agent': user_agent})\n\n\ndef getTracks(result):\n data = json.loads(result.content.decode('utf-8'))\n tracks = data['response']['items']\n tracks.reverse()\n return tracks\n\n\ndef getMp3FromM3u8(url):\n if url.find('index.m3u8?') == -1:\n return url\n parts = url.split('/')\n newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5\n ] + '.mp3'\n return newUrl\n\n\[email protected]_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == '/start':\n bot.send_message(message.from_user.id,\n 'Moscow Music Bot. Введите число треков')\n elif message.text == '/help':\n bot.send_message(message.from_user.id, 'Введите число треков')\n else:\n try:\n count = int(message.text)\n tracks = getTracks(sess.get(\n 'https://api.vk.com/method/audio.get', params=[(\n 'access_token', token), ('count', count), ('v', '5.95')]))\n for track in tracks:\n title = track['title']\n artist = track['artist']\n duration = track['duration']\n url = getMp3FromM3u8(track['url'])\n file = urllib.request.urlopen(url)\n try:\n bot.send_audio(message.from_user.id, file, duration=\n duration, title=title, performer=artist)\n except:\n bot.send_message(message.from_user.id,\n 'Ошибка загрузки {}'.format(title))\n except:\n bot.send_message(message.from_user.id, 'Ошибка исполнения')\n\n\nbot.infinity_polling()\n", "step-5": "from vkaudiotoken import get_vk_official_token\nimport requests\nimport json\nimport telebot\nimport urllib\nimport sys\n\n#check start args\ntry:\n if len(sys.argv) != 4:\n raise Exception\n\n botApiKey = sys.argv[1]\n login = sys.argv[2]\n password = sys.argv[3]\nexcept: \n print('Not enough arguments')\n print('Example: py filename.py botApiKey login password')\n print('')\n sys.exit()\n\n#check apikey\ntry:\n bot = telebot.TeleBot(botApiKey)\nexcept:\n print('Bot Error: Check botApiKey')\n print('')\n sys.exit()\n\n#check vk auth\ntry: \n tokenObj = get_vk_official_token(login, password)\nexcept:\n print('Login Error: Check login and password')\n print('')\n sys.exit()\n\n#create vk session\ntoken = tokenObj['token']\nuser_agent = tokenObj['user_agent']\n\nsess = requests.session()\nsess.headers.update({'User-Agent': user_agent})\n\n#trackList transform\ndef getTracks(result):\n data = json.loads(result.content.decode('utf-8'))\n tracks = data['response']['items']\n tracks.reverse()\n return tracks\n\n#m3u8 url convet to mp3 url\ndef getMp3FromM3u8(url):\n if url.find(\"index.m3u8?\") == -1:\n return url\n parts = url.split('/')\n newUrl = parts[0] + '//' + parts[2] + '/' + parts[3] + '/' + parts[5] + '.mp3'\n return newUrl\n\n#telegram bot\[email protected]_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == \"/start\":\n bot.send_message(message.from_user.id, \"Moscow Music Bot. Введите число треков\")\n elif message.text == \"/help\":\n bot.send_message(message.from_user.id, \"Введите число треков\")\n else:\n try:\n count = int(message.text) \n\n tracks = getTracks(sess.get(\n \"https://api.vk.com/method/audio.get\",\n params=[('access_token', token),\n ('count', count),\n ('v', '5.95')] \n ))\n\n for track in tracks:\n title = track['title']\n artist = track['artist']\n duration = track['duration']\n url = getMp3FromM3u8(track['url'])\n file = urllib.request.urlopen(url)\n try:\n bot.send_audio(message.from_user.id, file, duration=duration, title=title, performer=artist)\n except:\n bot.send_message(message.from_user.id, \"Ошибка загрузки {}\".format(title))\n except:\n bot.send_message(message.from_user.id, \"Ошибка исполнения\")\n\nbot.infinity_polling()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
""" This program takes information about students and their coursework and calculates their final grades based on the weight of each course factor """ def read_file(string_object): """ Opens and reads through a file, returning none if it isnt found """ try: return open(string_object,"r") except FileNotFoundError: return None def populate_weight_list(file_object): """ Takes information from a file object containing course weights and puts it into a list """ new_list = [] for line in file_object: new_list.append(line.split()) return new_list def populate_grades_list(file_object): """ Takes information from a file containing student emails and grades and puts each in seperate lists """ email_list = [] grade_list = [] for line in file_object: tmp_list = line.split() email_list.append(tmp_list[0]) grade_list.append(tmp_list[1::]) for value_list in grade_list: for i, value in enumerate(value_list): value_list[i] = float(value) return email_list, grade_list def populate_weight_tuple_list(list_object): """ Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """ tuple_list = [] for i in range(len(list_object[0])): weight_tuple = (list_object[0][i], float(list_object[1][i])) tuple_list.append(weight_tuple) return tuple_list def populate_grades_tuple_list(list_object1, list_object2): """ Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """ tuple_list = [] for i in range(len(list_object1)): grades_tuple = (list_object1[i], list_object2[i]) tuple_list.append(grades_tuple) return tuple_list def calculate_final_grade(list_object1, list_object2): """ Takes lists containing information about grades and course weights and calculates the final grade from the course """ list_object1 = [list(element) for element in list_object1] #Have to turn the tuples in the list to lists so that we can add the final grade to the list for i in range(len(list_object1)): final_grade = 0.0 for j in range(len(list_object1[i][1])): final_grade += (list_object1[i][1][j] * list_object2[j][1]) list_object1[i].append(final_grade) list_object1 = [tuple(element) for element in list_object1] #Turn the lists in the list into tuples again return list_object1 def print_results(list_object1, list_object2): """ Takes lists containing information about course parts and student grades and prints them in a formatted menu """ STUDENT_COLUMN = 16 GENERAL_COLUMN = 14 print() print("{:>{}}".format("Student ID",STUDENT_COLUMN),end="") for i in range(len(list_object1)): print("{:>{}}".format(list_object1[i][0],GENERAL_COLUMN),end="") print("{:>{}}".format("Course grade",GENERAL_COLUMN)) for tuple_element in list_object2: print("{:>{}}".format(tuple_element[0],STUDENT_COLUMN),end="") for i, value in enumerate(tuple_element[1]): print("{:>{}}".format(value,GENERAL_COLUMN),end="") print("{:>{}}".format(round(tuple_element[-1],2),GENERAL_COLUMN)) def main_func(): """ Main function """ parts_file_name = input("Enter filename for parts: ") parts_file = read_file(parts_file_name) if parts_file == None: print("File {} not found".format(parts_file_name)) else: parts_list = populate_weight_list(parts_file) weight_tuples_list = populate_weight_tuple_list(parts_list) print(weight_tuples_list) grades_file_name = input("Enter filename for grades: ") grade_file = read_file(grades_file_name) if grade_file == None: print("File {} not found".format(grades_file_name)) else: email_list, grades_list = populate_grades_list(grade_file) grades_tuple_list = populate_grades_tuple_list(email_list, grades_list) print(grades_tuple_list) modified_grade_tuple_list = calculate_final_grade(grades_tuple_list, weight_tuples_list) print(modified_grade_tuple_list) print_results(weight_tuples_list,modified_grade_tuple_list) main_func()
normal
{ "blob_id": "d8af8e36bd00fbfc966ef1c4dd0c6385cbb019ee", "index": 2064, "step-1": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1:])\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n return email_list, grade_list\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\ndef populate_weight_list(file_object):\n \"\"\" Takes information from a file object containing course weights and puts it into a list \"\"\"\n new_list = []\n for line in file_object:\n new_list.append(line.split())\n return new_list\n\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1:])\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n return email_list, grade_list\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\ndef main_func():\n \"\"\" Main function \"\"\"\n parts_file_name = input('Enter filename for parts: ')\n parts_file = read_file(parts_file_name)\n if parts_file == None:\n print('File {} not found'.format(parts_file_name))\n else:\n parts_list = populate_weight_list(parts_file)\n weight_tuples_list = populate_weight_tuple_list(parts_list)\n print(weight_tuples_list)\n grades_file_name = input('Enter filename for grades: ')\n grade_file = read_file(grades_file_name)\n if grade_file == None:\n print('File {} not found'.format(grades_file_name))\n else:\n email_list, grades_list = populate_grades_list(grade_file)\n grades_tuple_list = populate_grades_tuple_list(email_list,\n grades_list)\n print(grades_tuple_list)\n modified_grade_tuple_list = calculate_final_grade(grades_tuple_list\n , weight_tuples_list)\n print(modified_grade_tuple_list)\n print_results(weight_tuples_list, modified_grade_tuple_list)\n\n\n<mask token>\n", "step-5": "\"\"\"\nThis program takes information about students and their coursework and calculates their final grades based on the weight of each course factor\n\"\"\"\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object,\"r\")\n except FileNotFoundError:\n return None\n\ndef populate_weight_list(file_object):\n \"\"\" Takes information from a file object containing course weights and puts it into a list \"\"\"\n new_list = []\n\n for line in file_object:\n new_list.append(line.split())\n \n return new_list\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1::])\n\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n\n return email_list, grade_list\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n\n for i in range(len(list_object[0])):\n weight_tuple = (list_object[0][i], float(list_object[1][i]))\n tuple_list.append(weight_tuple)\n \n return tuple_list\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n\n for i in range(len(list_object1)):\n grades_tuple = (list_object1[i], list_object2[i])\n tuple_list.append(grades_tuple)\n \n return tuple_list\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n\n list_object1 = [list(element) for element in list_object1] #Have to turn the tuples in the list to lists so that we can add the final grade to the list\n\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += (list_object1[i][1][j] * list_object2[j][1])\n list_object1[i].append(final_grade)\n \n list_object1 = [tuple(element) for element in list_object1] #Turn the lists in the list into tuples again\n\n return list_object1\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))\n\n\ndef main_func():\n \"\"\" Main function \"\"\"\n\n parts_file_name = input(\"Enter filename for parts: \")\n parts_file = read_file(parts_file_name)\n\n if parts_file == None:\n print(\"File {} not found\".format(parts_file_name))\n else:\n parts_list = populate_weight_list(parts_file)\n weight_tuples_list = populate_weight_tuple_list(parts_list)\n print(weight_tuples_list)\n\n grades_file_name = input(\"Enter filename for grades: \")\n grade_file = read_file(grades_file_name)\n if grade_file == None:\n print(\"File {} not found\".format(grades_file_name))\n else:\n email_list, grades_list = populate_grades_list(grade_file)\n grades_tuple_list = populate_grades_tuple_list(email_list, grades_list)\n print(grades_tuple_list)\n\n modified_grade_tuple_list = calculate_final_grade(grades_tuple_list, weight_tuples_list)\n print(modified_grade_tuple_list)\n\n print_results(weight_tuples_list,modified_grade_tuple_list)\n\nmain_func() \n", "step-ids": [ 4, 5, 6, 8, 10 ] }
[ 4, 5, 6, 8, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def load(indices, category='train'): if category == 'train': if max(indices) < len(X_train) and max(indices) < len(y_train): return X_train[indices], y_train[indices] else: l = np.array([a for a in indices if a < len(X_train) and a < len(y_train)], np.int64) return X_train[l], y_train[l] elif category == 'test': return X_test[indices], y_test[indices] <|reserved_special_token_1|> <|reserved_special_token_0|> pwd = os.path.dirname(os.path.realpath(__file__)) train_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle =True) test_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True) train_data = train_data.reshape((1,))[0] test_data = test_data.reshape((1,))[0] X_train = train_data['X'].astype(np.float32) X_test = test_data['X'].astype(np.float32) y_train = train_data['y'].astype(np.int64) y_test = test_data['y'].astype(np.int64) def load(indices, category='train'): if category == 'train': if max(indices) < len(X_train) and max(indices) < len(y_train): return X_train[indices], y_train[indices] else: l = np.array([a for a in indices if a < len(X_train) and a < len(y_train)], np.int64) return X_train[l], y_train[l] elif category == 'test': return X_test[indices], y_test[indices] <|reserved_special_token_1|> import numpy as np import os pwd = os.path.dirname(os.path.realpath(__file__)) train_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle =True) test_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True) train_data = train_data.reshape((1,))[0] test_data = test_data.reshape((1,))[0] X_train = train_data['X'].astype(np.float32) X_test = test_data['X'].astype(np.float32) y_train = train_data['y'].astype(np.int64) y_test = test_data['y'].astype(np.int64) def load(indices, category='train'): if category == 'train': if max(indices) < len(X_train) and max(indices) < len(y_train): return X_train[indices], y_train[indices] else: l = np.array([a for a in indices if a < len(X_train) and a < len(y_train)], np.int64) return X_train[l], y_train[l] elif category == 'test': return X_test[indices], y_test[indices]
flexible
{ "blob_id": "8c364a518ab615803ea99520e90ee1dd24d37a8c", "index": 2524, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n", "step-3": "<mask token>\npwd = os.path.dirname(os.path.realpath(__file__))\ntrain_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle\n =True)\ntest_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True)\ntrain_data = train_data.reshape((1,))[0]\ntest_data = test_data.reshape((1,))[0]\nX_train = train_data['X'].astype(np.float32)\nX_test = test_data['X'].astype(np.float32)\ny_train = train_data['y'].astype(np.int64)\ny_test = test_data['y'].astype(np.int64)\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n", "step-4": "import numpy as np\nimport os\npwd = os.path.dirname(os.path.realpath(__file__))\ntrain_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle\n =True)\ntest_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True)\ntrain_data = train_data.reshape((1,))[0]\ntest_data = test_data.reshape((1,))[0]\nX_train = train_data['X'].astype(np.float32)\nX_test = test_data['X'].astype(np.float32)\ny_train = train_data['y'].astype(np.int64)\ny_test = test_data['y'].astype(np.int64)\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> {'variables': {'chromium_code': 1}, 'includes': ['ots-common.gypi'], 'targets': [{'target_name': 'ots', 'type': '<(library)', 'sources': [ '<@(ots_sources)'], 'include_dirs': ['<@(ots_include_dirs)'], 'direct_dependent_settings': {'include_dirs': ['<@(ots_include_dirs)']}, 'dependencies': ['../zlib/zlib.gyp:zlib']}]} <|reserved_special_token_1|> # Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'chromium_code': 1, }, 'includes': [ 'ots-common.gypi', ], 'targets': [ { 'target_name': 'ots', 'type': '<(library)', 'sources': [ '<@(ots_sources)', ], 'include_dirs': [ '<@(ots_include_dirs)', ], 'direct_dependent_settings': { 'include_dirs': [ '<@(ots_include_dirs)', ], }, 'dependencies': [ '../zlib/zlib.gyp:zlib', ], }, ], }
flexible
{ "blob_id": "7413d4e98f79bf7b389a6305257833293714fc81", "index": 1786, "step-1": "<mask token>\n", "step-2": "{'variables': {'chromium_code': 1}, 'includes': ['ots-common.gypi'],\n 'targets': [{'target_name': 'ots', 'type': '<(library)', 'sources': [\n '<@(ots_sources)'], 'include_dirs': ['<@(ots_include_dirs)'],\n 'direct_dependent_settings': {'include_dirs': ['<@(ots_include_dirs)']},\n 'dependencies': ['../zlib/zlib.gyp:zlib']}]}\n", "step-3": "# Copyright (c) 2009 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n{\n 'variables': {\n 'chromium_code': 1,\n },\n 'includes': [\n 'ots-common.gypi',\n ],\n 'targets': [\n {\n 'target_name': 'ots',\n 'type': '<(library)',\n 'sources': [\n '<@(ots_sources)',\n ],\n 'include_dirs': [\n '<@(ots_include_dirs)',\n ],\n 'direct_dependent_settings': {\n 'include_dirs': [\n '<@(ots_include_dirs)',\n ],\n },\n 'dependencies': [\n '../zlib/zlib.gyp:zlib',\n ],\n },\n ],\n}\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django.urls import path from player.views import ( MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView, NextSongView, PreviousSongView ) urlpatterns = [ path('list/', MusicListView, name="music_list"), path('play/<str:name>/', MusicPlayView, name="play_music"), path('pause/', MusicPauseView, name="pause_music"), path('unpause/', MusicUnPauseView, name="unpause_music"), path('nextsong/', NextSongView, name="next_song"), path('prevsong/', PreviousSongView, name="previous_song"), ]
normal
{ "blob_id": "f23b002ec0eefa376890e255b1ac0137e3a1c989", "index": 5338, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('list/', MusicListView, name='music_list'), path(\n 'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',\n MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,\n name='unpause_music'), path('nextsong/', NextSongView, name='next_song'\n ), path('prevsong/', PreviousSongView, name='previous_song')]\n", "step-3": "from django.urls import path\nfrom player.views import MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView, NextSongView, PreviousSongView\nurlpatterns = [path('list/', MusicListView, name='music_list'), path(\n 'play/<str:name>/', MusicPlayView, name='play_music'), path('pause/',\n MusicPauseView, name='pause_music'), path('unpause/', MusicUnPauseView,\n name='unpause_music'), path('nextsong/', NextSongView, name='next_song'\n ), path('prevsong/', PreviousSongView, name='previous_song')]\n", "step-4": "from django.urls import path\n\nfrom player.views import (\n MusicListView, MusicPlayView, MusicPauseView, MusicUnPauseView,\n NextSongView, PreviousSongView\n)\n\nurlpatterns = [\n path('list/', MusicListView, name=\"music_list\"),\n path('play/<str:name>/', MusicPlayView, name=\"play_music\"),\n path('pause/', MusicPauseView, name=\"pause_music\"),\n path('unpause/', MusicUnPauseView, name=\"unpause_music\"),\n path('nextsong/', NextSongView, name=\"next_song\"),\n path('prevsong/', PreviousSongView, name=\"previous_song\"),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> from viewController import * from navigationController import * from noticer import * from Images import * from fancyButton import * from constants import * from textObject import * from UIButton import * from UIView import * from UIAlertView import * <|reserved_special_token_1|> # This file imports all files for this module for easy inclusions around the game. from viewController import * from navigationController import * from noticer import * from Images import * from fancyButton import * from constants import * from textObject import * from UIButton import * # from spriteFromRect import * from UIView import * from UIAlertView import *
flexible
{ "blob_id": "7168a8eb401478aa26ee9033262bb5c8fe33f186", "index": 7011, "step-1": "<mask token>\n", "step-2": "from viewController import *\nfrom navigationController import *\nfrom noticer import *\nfrom Images import *\nfrom fancyButton import *\nfrom constants import *\nfrom textObject import *\nfrom UIButton import *\nfrom UIView import *\nfrom UIAlertView import *\n", "step-3": "\n# This file imports all files for this module for easy inclusions around the game.\n\n\nfrom viewController import *\nfrom navigationController import *\nfrom noticer import *\nfrom Images import *\nfrom fancyButton import *\nfrom constants import *\nfrom textObject import *\nfrom UIButton import *\n# from spriteFromRect import *\nfrom UIView import *\nfrom UIAlertView import *", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @login_required def todo(request): eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True ).order_by('Strain', 'Background', 'Rack', 'Cage') genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude( Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack', 'Cage') wean = datetime.date.today() - datetime.timedelta(days=30) wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None, Alive=True).exclude(Strain=2).order_by('Strain', 'Background', 'Rack', 'Cage') return render_to_response('todo.html', {'eartag_list': eartag_list, 'wean_list': wean_list, 'genotype_list': genotype_list}, context_instance=RequestContext(request)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @login_required def todo(request): eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True ).order_by('Strain', 'Background', 'Rack', 'Cage') genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude( Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack', 'Cage') wean = datetime.date.today() - datetime.timedelta(days=30) wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None, Alive=True).exclude(Strain=2).order_by('Strain', 'Background', 'Rack', 'Cage') return render_to_response('todo.html', {'eartag_list': eartag_list, 'wean_list': wean_list, 'genotype_list': genotype_list}, context_instance=RequestContext(request)) @login_required def home(request): cursor = connection.cursor() cage_list = Animal.objects.values('Cage') cage_list_current = Animal.objects.filter(Alive=True).values('Cage') animal_list = Animal.objects.all() animal_list_current = Animal.objects.filter(Alive=True) strain_list = Strain.objects.all() strain_list_current = Strain.objects.filter(animal__Alive=True) return render_to_response('home.html', {'animal_list': animal_list, 'animal_list_current': animal_list_current, 'strain_list': strain_list, 'strain_list_current': strain_list_current, 'cage_list': cage_list, 'cage_list_current': cage_list_current}, context_instance=RequestContext(request)) <|reserved_special_token_1|> from django.shortcuts import render_to_response from mousedb.animal.models import Animal, Strain from django.contrib.auth.decorators import login_required from django.template import RequestContext from django.db import connection import datetime @login_required def todo(request): eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True ).order_by('Strain', 'Background', 'Rack', 'Cage') genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude( Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack', 'Cage') wean = datetime.date.today() - datetime.timedelta(days=30) wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None, Alive=True).exclude(Strain=2).order_by('Strain', 'Background', 'Rack', 'Cage') return render_to_response('todo.html', {'eartag_list': eartag_list, 'wean_list': wean_list, 'genotype_list': genotype_list}, context_instance=RequestContext(request)) @login_required def home(request): cursor = connection.cursor() cage_list = Animal.objects.values('Cage') cage_list_current = Animal.objects.filter(Alive=True).values('Cage') animal_list = Animal.objects.all() animal_list_current = Animal.objects.filter(Alive=True) strain_list = Strain.objects.all() strain_list_current = Strain.objects.filter(animal__Alive=True) return render_to_response('home.html', {'animal_list': animal_list, 'animal_list_current': animal_list_current, 'strain_list': strain_list, 'strain_list_current': strain_list_current, 'cage_list': cage_list, 'cage_list_current': cage_list_current}, context_instance=RequestContext(request)) <|reserved_special_token_1|> from django.shortcuts import render_to_response from mousedb.animal.models import Animal, Strain from django.contrib.auth.decorators import login_required from django.template import RequestContext from django.db import connection import datetime @login_required def todo(request): eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True).order_by('Strain','Background','Rack','Cage') genotype_list = Animal.objects.filter(Genotype="N.D.", Alive=True).exclude(Strain__Strain="C57BL/6").order_by('Strain','Background','Rack','Cage') wean = datetime.date.today() - datetime.timedelta(days=30) wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage') return render_to_response('todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list},context_instance=RequestContext(request)) @login_required def home(request): cursor = connection.cursor() cage_list = Animal.objects.values("Cage") cage_list_current = Animal.objects.filter(Alive=True).values("Cage") animal_list = Animal.objects.all() animal_list_current = Animal.objects.filter(Alive=True) strain_list = Strain.objects.all() strain_list_current = Strain.objects.filter(animal__Alive=True) return render_to_response('home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current},context_instance=RequestContext(request))
flexible
{ "blob_id": "89518f43934710ef2e7471a91128e20d2306d6f6", "index": 9291, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef home(request):\n cursor = connection.cursor()\n cage_list = Animal.objects.values('Cage')\n cage_list_current = Animal.objects.filter(Alive=True).values('Cage')\n animal_list = Animal.objects.all()\n animal_list_current = Animal.objects.filter(Alive=True)\n strain_list = Strain.objects.all()\n strain_list_current = Strain.objects.filter(animal__Alive=True)\n return render_to_response('home.html', {'animal_list': animal_list,\n 'animal_list_current': animal_list_current, 'strain_list':\n strain_list, 'strain_list_current': strain_list_current,\n 'cage_list': cage_list, 'cage_list_current': cage_list_current},\n context_instance=RequestContext(request))\n", "step-4": "from django.shortcuts import render_to_response\nfrom mousedb.animal.models import Animal, Strain\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.db import connection\nimport datetime\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef home(request):\n cursor = connection.cursor()\n cage_list = Animal.objects.values('Cage')\n cage_list_current = Animal.objects.filter(Alive=True).values('Cage')\n animal_list = Animal.objects.all()\n animal_list_current = Animal.objects.filter(Alive=True)\n strain_list = Strain.objects.all()\n strain_list_current = Strain.objects.filter(animal__Alive=True)\n return render_to_response('home.html', {'animal_list': animal_list,\n 'animal_list_current': animal_list_current, 'strain_list':\n strain_list, 'strain_list_current': strain_list_current,\n 'cage_list': cage_list, 'cage_list_current': cage_list_current},\n context_instance=RequestContext(request))\n", "step-5": "from django.shortcuts import render_to_response\nfrom mousedb.animal.models import Animal, Strain\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.db import connection\nimport datetime\n\n@login_required\ndef todo(request):\n\teartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True).order_by('Strain','Background','Rack','Cage')\n\tgenotype_list = Animal.objects.filter(Genotype=\"N.D.\", Alive=True).exclude(Strain__Strain=\"C57BL/6\").order_by('Strain','Background','Rack','Cage')\n\twean = datetime.date.today() - datetime.timedelta(days=30)\n\twean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage')\n\treturn render_to_response('todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list},context_instance=RequestContext(request))\n\n@login_required\ndef home(request):\n\tcursor = connection.cursor()\n\tcage_list = Animal.objects.values(\"Cage\")\n\tcage_list_current = Animal.objects.filter(Alive=True).values(\"Cage\")\n\tanimal_list = Animal.objects.all()\n\tanimal_list_current = Animal.objects.filter(Alive=True)\n\tstrain_list = Strain.objects.all()\n\tstrain_list_current = Strain.objects.filter(animal__Alive=True)\n\treturn render_to_response('home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current},context_instance=RequestContext(request))\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def pig_it(text): return ' '.join(letter if letter == '!' or letter == '?' else letter[1: ] + letter[0] + 'ay' for letter in text.split(' ')) <|reserved_special_token_1|> #Simple Pig Latin def pig_it(text): return " ".join( letter if letter == "!" or letter == "?" else (letter[1:] + letter[0] + "ay") for letter in text.split(" "))
flexible
{ "blob_id": "25641b3a9919db1f172fca22acf413062505de1b", "index": 6894, "step-1": "<mask token>\n", "step-2": "def pig_it(text):\n return ' '.join(letter if letter == '!' or letter == '?' else letter[1:\n ] + letter[0] + 'ay' for letter in text.split(' '))\n", "step-3": "#Simple Pig Latin\ndef pig_it(text):\n return \" \".join( letter if letter == \"!\" or letter == \"?\" else (letter[1:] + letter[0] + \"ay\") for letter in text.split(\" \"))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class BaseDBMgr: def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), page: int=1, per_page: int=10) ->dict: """获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict """ res = {'page': {'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0}, 'items': []} query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page - 1) * per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def delete(self, cls_: BaseMixin, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at == 0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session =False) db.commit() return affect_rows <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseDBMgr: def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), page: int=1, per_page: int=10) ->dict: """获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict """ res = {'page': {'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0}, 'items': []} query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page - 1) * per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), limit: int=0) ->list: """获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list """ query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list( ), field: tuple=()) ->dict: """获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict """ items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_: BaseMixin, data: dict) ->int: """插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 """ item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_: BaseMixin, data: dict, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) return query.update(data, synchronize_session=False) def delete(self, cls_: BaseMixin, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at == 0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session =False) db.commit() return affect_rows <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseDBMgr: def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), page: int=1, per_page: int=10) ->dict: """获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict """ res = {'page': {'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0}, 'items': []} query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page - 1) * per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), limit: int=0) ->list: """获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list """ query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list( ), field: tuple=()) ->dict: """获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict """ items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_: BaseMixin, data: dict) ->int: """插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 """ item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_: BaseMixin, data: dict, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) return query.update(data, synchronize_session=False) def delete(self, cls_: BaseMixin, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at == 0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session =False) db.commit() return affect_rows def count(self, cls_: BaseMixin, filters: set, field=None) ->int: """获取满足条件的总行数 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @param string|None field 统计的字段 @return int """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) if field is None: return query.count() else: return query.count(field) <|reserved_special_token_1|> <|reserved_special_token_0|> Orders = List[Set(str, Union(str, int, decimal.Decimal))] class BaseDBMgr: def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), page: int=1, per_page: int=10) ->dict: """获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict """ res = {'page': {'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0}, 'items': []} query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page - 1) * per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(), field: tuple=(), limit: int=0) ->list: """获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list """ query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list( ), field: tuple=()) ->dict: """获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict """ items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_: BaseMixin, data: dict) ->int: """插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 """ item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_: BaseMixin, data: dict, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) return query.update(data, synchronize_session=False) def delete(self, cls_: BaseMixin, filters: set) ->int: """更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at == 0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session =False) db.commit() return affect_rows def count(self, cls_: BaseMixin, filters: set, field=None) ->int: """获取满足条件的总行数 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @param string|None field 统计的字段 @return int """ query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at == 0) if field is None: return query.count() else: return query.count(field) <|reserved_special_token_1|> import math import decimal from typing import Union, List, Set from sqlalchemy import text from .model import BaseMixin from ..core.db import db Orders = List[Set(str, Union(str, int, decimal.Decimal))] class BaseDBMgr: def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict: '''获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict ''' res = { 'page': { 'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0, }, 'items': [] } query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page-1)*per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list: '''获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list ''' query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict: '''获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict ''' items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_:BaseMixin, data:dict)->int: '''插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 ''' item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_:BaseMixin, data:dict, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) return query.update(data, synchronize_session=False) def delete(self, cls_:BaseMixin, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at==0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session=False) db.commit() return affect_rows def count(self, cls_:BaseMixin, filters:set, field=None)->int: '''获取满足条件的总行数 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @param string|None field 统计的字段 @return int ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) if field is None: return query.count() else: return query.count(field)
flexible
{ "blob_id": "2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08", "index": 39, "step-1": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n", "step-2": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n", "step-3": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "step-4": "<mask token>\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "step-5": "import math\nimport decimal\nfrom typing import Union, List, Set\n\nfrom sqlalchemy import text\n\nfrom .model import BaseMixin\nfrom ..core.db import db\n\n\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:\n '''获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n '''\n res = {\n 'page': {\n 'current_page': page,\n 'per_page': per_page,\n 'total_page': 0,\n 'count': 0,\n },\n 'items': []\n }\n query = db.query(cls_).filter(*filters)\n \n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n data = query.offset((page-1)*per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n \n return res\n\n\n def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:\n '''获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n '''\n query = db.query(cls_)\n \n if filters:\n query = query.filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n if limit != 0:\n query = query.limit(limit)\n \n query = query.all()\n\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n \n return items\n\n\n def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:\n '''获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n '''\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n\n def add(self, cls_:BaseMixin, data:dict)->int:\n '''插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n '''\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n\n def update(self, cls_:BaseMixin, data:dict, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n return query.update(data, synchronize_session=False)\n\n\n def delete(self, cls_:BaseMixin, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at==0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session=False)\n db.commit()\n return affect_rows\n\n\n def count(self, cls_:BaseMixin, filters:set, field=None)->int:\n '''获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n \n if field is None:\n return query.count()\n else:\n return query.count(field)\n", "step-ids": [ 3, 7, 8, 9, 11 ] }
[ 3, 7, 8, 9, 11 ]
import pytest from django_swagger_utils.drf_server.exceptions import NotFound from unittest.mock import create_autospec from content_management_portal.constants.enums import TextType from content_management_portal.interactors.storages.storage_interface \ import StorageInterface from content_management_portal.interactors.presenters. \ question_presenter_interface import PresenterInterface from content_management_portal.interactors.question_creation_interactor \ import QuestionCreateInteractor from content_management_portal.interactors.question_updation_interactor \ import QuestionUpdateInteractor from content_management_portal.interactors.question_deletion_interactor \ import QuestionDeletionInteractor class TestQuestionInteractor: def test_question_create(self,questiondto): user_id=1 short_title="hello" content_type="HTML" content="hi" storage=create_autospec(StorageInterface) presenter=create_autospec(PresenterInterface) interactor = QuestionCreateInteractor(storage=storage,presenter=presenter) interactor.question_creation(user_id=user_id,short_title=short_title, \ content_type=content_type, content=content) # Assert storage.question_creation.assert_called_once_with( \ user_id=user_id, short_title=short_title, content_type=content_type, content=content ) presenter.get_question_dto_response(questiondto=questiondto) def test_question_update(self,questiondto): user_id=1 question_id=1 short_title="hello" content_type="HTML" content="hi" storage=create_autospec(StorageInterface) presenter=create_autospec(PresenterInterface) interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter) interactor.question_updation(user_id=user_id, short_title=short_title, content_type=content_type, content=content, question_id=question_id ) # Assert storage.question_updation.assert_called_once_with( \ user_id=user_id, short_title=short_title, content_type=content_type, content=content, question_id=question_id ) presenter.get_question_dto_response(questiondto=questiondto) def test_question_deletion(self): # Arrange question_id=1 storage=create_autospec(StorageInterface) interactor = QuestionDeletionInteractor(storage=storage) # Act interactor.question_deletion(question_id=question_id) # Assert storage.question_deletion.assert_called_once_with(question_id=question_id)
normal
{ "blob_id": "1c66ccb80383feeee96b3fb492ff63be1a67a796", "index": 5496, "step-1": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n", "step-3": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n", "step-4": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface import StorageInterface\nfrom content_management_portal.interactors.presenters.question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n", "step-5": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\n\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface \\\n import StorageInterface\nfrom content_management_portal.interactors.presenters. \\\n question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor \\\n import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor \\\n import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor \\\n import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self,questiondto):\n user_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)\n interactor.question_creation(user_id=user_id,short_title=short_title, \\\n content_type=content_type, content=content)\n\n # Assert\n storage.question_creation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self,questiondto):\n user_id=1\n question_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)\n interactor.question_updation(user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n\n\n # Assert\n storage.question_updation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n \n def test_question_deletion(self):\n\n # Arrange\n question_id=1\n storage=create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n \n # Act\n interactor.question_deletion(question_id=question_id)\n \n # Assert\n storage.question_deletion.assert_called_once_with(question_id=question_id)\n \n \n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# difference between size an shape of an image import cv2 img = cv2.imread('police.jpg') print img.size # byte size; slightly larger than the file size print img.shape # y,x or rows, cols cv2.imshow("My Picture", img) cv2.waitKey(0) cv2.destroyAllWindows()
normal
{ "blob_id": "ba42c6af53329035f7ab72f3f1ac87cd90d9dc7f", "index": 9408, "step-1": "# difference between size an shape of an image\r\n\r\nimport cv2\r\n\r\nimg = cv2.imread('police.jpg')\r\nprint img.size # byte size; slightly larger than the file size\r\nprint img.shape # y,x or rows, cols\r\n\r\ncv2.imshow(\"My Picture\", img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python __author__ = 'greghines' import numpy as np import matplotlib.pyplot as plt import csv import sys import os import pymongo import matplotlib.cbook as cbook import cPickle as pickle sys.path.append("/home/greg/github/pyIBCC/python") import ibcc client = pymongo.MongoClient() db = client['condor_2014-09-14'] collection = db["condor_classifications"] collection2 = db["condor_subjects"] subjects = [] users = [] classifications = [] with open("/home/greg/Databases/condor_ibcc.py","wb") as f: f.write("import numpy as np\n") f.write("scores = np.array([0,1])\n") f.write("nScores = len(scores)\n") f.write("nClasses = 2\n") f.write("inputFile = \"/home/greg/Databases/condor_ibcc.csv\"\n") f.write("outputFile = \"/home/greg/Databases/condor_ibcc.out\"\n") f.write("confMatFile = \"/home/greg/Databases/condor_ibcc.mat\"\n") f.write("nu0 = np.array([30,70])\n") f.write("alpha0 = np.array([[3, 1], [1,3]])\n") import datetime i = 0 errorCount = 0 for r in collection.find({"$and": [{"tutorial": False},{"subjects" : {"$elemMatch" : {"zooniverse_id" : {"$exists" : True}}}}]}): try: user_name = r["user_name"] except KeyError: continue subject_id = r["subjects"][0]["zooniverse_id"] if not(user_name in users): users.append(user_name) if not(subject_id in subjects): subjects.append(subject_id) user_index = users.index(user_name) subject_index = subjects.index(subject_id) if ("marks" in r["annotations"][-1]): blank = 1 for markings in r["annotations"][-1]["marks"].values(): try: if markings["animal"] in ["condor","raven","goldenEagle","coyote","turkeyVulture"]: blank = 0 break elif markings["animal"] in ["carcassOrScale"]: continue else: errorCount += 1 except KeyError: errorCount += 1 else: blank = 1 i += 1 #if i == 1000: # break if (i % 5000) == 0: print i classifications.append((user_index,subject_index,blank)) print "====----" print errorCount try: os.remove("/home/greg/Databases/condor_ibcc.out") except OSError: pass try: os.remove("/home/greg/Databases/condor_ibcc.mat") except OSError: pass try: os.remove("/home/greg/Databases/condor_ibcc.csv.dat") except OSError: pass with open("/home/greg/Databases/condor_ibcc.csv","wb") as f: f.write("a,b,c\n") for u, s, b in classifications: f.write(str(u)+","+str(s)+","+str(b)+"\n") print datetime.datetime.time(datetime.datetime.now()) ibcc.runIbcc("/home/greg/Databases/condor_ibcc.py") print datetime.datetime.time(datetime.datetime.now()) pickle.dump(subjects,open("/home/greg/Databases/condor_ibcc.pickle","wb"))
normal
{ "blob_id": "c025fccad9d37dff4db3a10455cbe7d92917d8f6", "index": 6341, "step-1": "#!/usr/bin/env python\n__author__ = 'greghines'\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport sys\nimport os\nimport pymongo\nimport matplotlib.cbook as cbook\nimport cPickle as pickle\n\nsys.path.append(\"/home/greg/github/pyIBCC/python\")\nimport ibcc\n\n\n\nclient = pymongo.MongoClient()\ndb = client['condor_2014-09-14']\ncollection = db[\"condor_classifications\"]\ncollection2 = db[\"condor_subjects\"]\n\nsubjects = []\nusers = []\nclassifications = []\n\n\nwith open(\"/home/greg/Databases/condor_ibcc.py\",\"wb\") as f:\n f.write(\"import numpy as np\\n\")\n f.write(\"scores = np.array([0,1])\\n\")\n f.write(\"nScores = len(scores)\\n\")\n f.write(\"nClasses = 2\\n\")\n f.write(\"inputFile = \\\"/home/greg/Databases/condor_ibcc.csv\\\"\\n\")\n f.write(\"outputFile = \\\"/home/greg/Databases/condor_ibcc.out\\\"\\n\")\n f.write(\"confMatFile = \\\"/home/greg/Databases/condor_ibcc.mat\\\"\\n\")\n f.write(\"nu0 = np.array([30,70])\\n\")\n f.write(\"alpha0 = np.array([[3, 1], [1,3]])\\n\")\n\nimport datetime\ni = 0\nerrorCount = 0\nfor r in collection.find({\"$and\": [{\"tutorial\": False},{\"subjects\" : {\"$elemMatch\" : {\"zooniverse_id\" : {\"$exists\" : True}}}}]}):\n try:\n user_name = r[\"user_name\"]\n except KeyError:\n continue\n\n subject_id = r[\"subjects\"][0][\"zooniverse_id\"]\n\n\n\n if not(user_name in users):\n users.append(user_name)\n if not(subject_id in subjects):\n subjects.append(subject_id)\n\n user_index = users.index(user_name)\n subject_index = subjects.index(subject_id)\n\n\n\n if (\"marks\" in r[\"annotations\"][-1]):\n blank = 1\n for markings in r[\"annotations\"][-1][\"marks\"].values():\n try:\n if markings[\"animal\"] in [\"condor\",\"raven\",\"goldenEagle\",\"coyote\",\"turkeyVulture\"]:\n blank = 0\n break\n elif markings[\"animal\"] in [\"carcassOrScale\"]:\n continue\n else:\n errorCount += 1\n except KeyError:\n errorCount += 1\n else:\n blank = 1\n\n i += 1\n #if i == 1000:\n # break\n if (i % 5000) == 0:\n print i\n classifications.append((user_index,subject_index,blank))\n\nprint \"====----\"\nprint errorCount\n\ntry:\n os.remove(\"/home/greg/Databases/condor_ibcc.out\")\nexcept OSError:\n pass\n\ntry:\n os.remove(\"/home/greg/Databases/condor_ibcc.mat\")\nexcept OSError:\n pass\n\ntry:\n os.remove(\"/home/greg/Databases/condor_ibcc.csv.dat\")\nexcept OSError:\n pass\n\nwith open(\"/home/greg/Databases/condor_ibcc.csv\",\"wb\") as f:\n f.write(\"a,b,c\\n\")\n\n for u, s, b in classifications:\n f.write(str(u)+\",\"+str(s)+\",\"+str(b)+\"\\n\")\n\n\nprint datetime.datetime.time(datetime.datetime.now())\nibcc.runIbcc(\"/home/greg/Databases/condor_ibcc.py\")\nprint datetime.datetime.time(datetime.datetime.now())\n\npickle.dump(subjects,open(\"/home/greg/Databases/condor_ibcc.pickle\",\"wb\"))", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class Comment(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return self.user.username class Comment_to_comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE) comment = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.from_comment.comment_box class Points(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) point = models.IntegerField(default=0) <|reserved_special_token_1|> <|reserved_special_token_0|> class Post(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return self.title class Comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) comment_box = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.user.username class Comment_to_comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE) comment = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.from_comment.comment_box class Points(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) point = models.IntegerField(default=0) <|reserved_special_token_1|> <|reserved_special_token_0|> class Post(models.Model): title = models.CharField(max_length=40) content = models.TextField() date_published = models.DateTimeField(auto_now=True) author = models.ForeignKey(User, on_delete=models.CASCADE) img = models.ImageField(upload_to='post_img', null=True, blank=True) like = models.ManyToManyField(User, related_name='like_user', blank=True) dislike = models.ManyToManyField(User, related_name='dislike_user', blank=True) def __str__(self): return self.title class Comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) comment_box = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.user.username class Comment_to_comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE) comment = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.from_comment.comment_box class Points(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) point = models.IntegerField(default=0) <|reserved_special_token_1|> from django.db import models from django.contrib.auth.models import User class Post(models.Model): title = models.CharField(max_length=40) content = models.TextField() date_published = models.DateTimeField(auto_now=True) author = models.ForeignKey(User, on_delete=models.CASCADE) img = models.ImageField(upload_to='post_img', null=True, blank=True) like = models.ManyToManyField(User, related_name='like_user', blank=True) dislike = models.ManyToManyField(User, related_name='dislike_user', blank=True) def __str__(self): return self.title class Comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) comment_box = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.user.username class Comment_to_comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE) comment = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.from_comment.comment_box class Points(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) point = models.IntegerField(default=0) <|reserved_special_token_1|> from django.db import models from django.contrib.auth.models import User # Create your models here. class Post(models.Model): title = models.CharField(max_length=40) content = models.TextField() date_published = models.DateTimeField(auto_now=True) author = models.ForeignKey(User, on_delete=models.CASCADE) img = models.ImageField(upload_to='post_img', null=True, blank=True) like = models.ManyToManyField(User, related_name='like_user', blank=True) dislike = models.ManyToManyField(User, related_name='dislike_user',blank=True) def __str__(self): return self.title class Comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) comment_box = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.user.username class Comment_to_comment(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE) comment = models.TextField() date_comment = models.DateTimeField(auto_now=True) def __str__(self): return self.from_comment.comment_box class Points(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) point = models.IntegerField(default=0)
flexible
{ "blob_id": "1257b90781a213ca8e07f67a33b8e847d0525653", "index": 9354, "step-1": "<mask token>\n\n\nclass Comment(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n", "step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n", "step-3": "<mask token>\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',\n blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n", "step-4": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',\n blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n", "step-5": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',blank=True)\n\n def __str__(self):\n return self.title\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n ", "step-ids": [ 7, 10, 11, 12, 13 ] }
[ 7, 10, 11, 12, 13 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def nqueen(depth, n, history): global cnt if depth == n: cnt += 1 else: for i in range(n): if i not in history: for index, value in enumerate(history): if abs(depth - index) == abs(i - value): break else: history.append(i) nqueen(depth + 1, n, history) history.remove(i) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def nqueen(depth, n, history): global cnt if depth == n: cnt += 1 else: for i in range(n): if i not in history: for index, value in enumerate(history): if abs(depth - index) == abs(i - value): break else: history.append(i) nqueen(depth + 1, n, history) history.remove(i) for t in range(int(input())): cnt = 0 nqueen(0, int(input()), []) print('#{} {}'.format(t + 1, cnt)) <|reserved_special_token_1|> ''' swea 2806 N-Queen ''' def nqueen(depth, n, history): global cnt if depth == n: cnt += 1 else: for i in range(n): if i not in history: for index, value in enumerate(history): if abs(depth - index) == abs(i - value): break else: history.append(i) nqueen(depth + 1, n, history) history.remove(i) for t in range(int(input())): cnt = 0 nqueen(0, int(input()), []) print("#{} {}".format(t+1, cnt))
flexible
{ "blob_id": "b35686f7feec2c4a905007f3c105b6fa05b87297", "index": 5365, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef nqueen(depth, n, history):\n global cnt\n if depth == n:\n cnt += 1\n else:\n for i in range(n):\n if i not in history:\n for index, value in enumerate(history):\n if abs(depth - index) == abs(i - value):\n break\n else:\n history.append(i)\n nqueen(depth + 1, n, history)\n history.remove(i)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef nqueen(depth, n, history):\n global cnt\n if depth == n:\n cnt += 1\n else:\n for i in range(n):\n if i not in history:\n for index, value in enumerate(history):\n if abs(depth - index) == abs(i - value):\n break\n else:\n history.append(i)\n nqueen(depth + 1, n, history)\n history.remove(i)\n\n\nfor t in range(int(input())):\n cnt = 0\n nqueen(0, int(input()), [])\n print('#{} {}'.format(t + 1, cnt))\n", "step-4": "'''\nswea 2806 N-Queen\n'''\ndef nqueen(depth, n, history):\n global cnt\n if depth == n:\n cnt += 1\n else:\n for i in range(n):\n if i not in history:\n for index, value in enumerate(history):\n if abs(depth - index) == abs(i - value):\n break\n else:\n history.append(i)\n nqueen(depth + 1, n, history)\n history.remove(i)\n\n\nfor t in range(int(input())):\n cnt = 0\n nqueen(0, int(input()), [])\n print(\"#{} {}\".format(t+1, cnt))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> def get_user_by_id(id): if id is None: return user = User.query.filter(User.alias_id == id).first() return user <|reserved_special_token_0|> def update_user(first_name, last_name): user = flask_login.current_user user.first_name = first_name user.last_name = last_name db.session.merge(user) db.session.commit() return True def verify_email(user): token = user.generate_validate_token() url = current_app.config['APP_URL'] + '/auth/verify/' + token subject = '[Togger] Welcome to Togger. Verify your email' prepare_email(user.username, subject, url) def restore_password(token, new_password): user = User() if user.check_password_token(token): user = get_user(user.username) user.set_password(new_password) db.session.merge(user) db.session.commit() flask_login.login_user(user, remember=True) return True else: flash('Restoration link got expired. Please request a new one.', 'danger') return False def password_email(username): user = get_user(username) if user and user.is_verified: token = user.generate_password_token() url = current_app.config['APP_URL'] + '/auth/restore/' + token subject = ( '[Togger] Forgot your password? The restoration link is inside') prepare_email(user.username, subject, url) <|reserved_special_token_0|> def send_email(username, subject, content, config): msg = EmailMessage() msg.set_content(content) msg['Subject'] = subject msg['From'] = config['SMTP_MAILBOX'] msg['To'] = username s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT']) s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD']) s.send_message(msg) s.quit() def confirm_verify_email(token): user = User() if user.check_validate_token(token): user = get_user(user.username) user.is_verified = True db.session.merge(user) db.session.commit() else: flash('Verification link got expired. Please request a new one.', 'danger') def change_password(old_password, new_password): if flask_login.current_user.check_password(old_password): flask_login.current_user.set_password(new_password) db.session.merge(flask_login.current_user) db.session.commit() flash('Password was changed. Please sign in using new password.', 'success') return True flash('Current password is incorrect.', 'danger') return False def get_roles(): try: return flask_login.current_user.roles except AttributeError: return [] def get_role(): for role in get_roles(): if role.is_default: return role return None def has_role(role_type): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): role = get_role() if role and role.type >= role_type: result = function(*args, **kwargs) else: result = current_app.login_manager.unauthorized() return result return wrapper return decorator <|reserved_special_token_1|> <|reserved_special_token_0|> def get_user(username): if username is None: return user = User.query.filter(User.username == username).first() return user def get_user_by_id(id): if id is None: return user = User.query.filter(User.alias_id == id).first() return user <|reserved_special_token_0|> def update_user(first_name, last_name): user = flask_login.current_user user.first_name = first_name user.last_name = last_name db.session.merge(user) db.session.commit() return True def verify_email(user): token = user.generate_validate_token() url = current_app.config['APP_URL'] + '/auth/verify/' + token subject = '[Togger] Welcome to Togger. Verify your email' prepare_email(user.username, subject, url) def restore_password(token, new_password): user = User() if user.check_password_token(token): user = get_user(user.username) user.set_password(new_password) db.session.merge(user) db.session.commit() flask_login.login_user(user, remember=True) return True else: flash('Restoration link got expired. Please request a new one.', 'danger') return False def password_email(username): user = get_user(username) if user and user.is_verified: token = user.generate_password_token() url = current_app.config['APP_URL'] + '/auth/restore/' + token subject = ( '[Togger] Forgot your password? The restoration link is inside') prepare_email(user.username, subject, url) <|reserved_special_token_0|> def send_email(username, subject, content, config): msg = EmailMessage() msg.set_content(content) msg['Subject'] = subject msg['From'] = config['SMTP_MAILBOX'] msg['To'] = username s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT']) s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD']) s.send_message(msg) s.quit() def confirm_verify_email(token): user = User() if user.check_validate_token(token): user = get_user(user.username) user.is_verified = True db.session.merge(user) db.session.commit() else: flash('Verification link got expired. Please request a new one.', 'danger') def change_password(old_password, new_password): if flask_login.current_user.check_password(old_password): flask_login.current_user.set_password(new_password) db.session.merge(flask_login.current_user) db.session.commit() flash('Password was changed. Please sign in using new password.', 'success') return True flash('Current password is incorrect.', 'danger') return False def get_roles(): try: return flask_login.current_user.roles except AttributeError: return [] def get_role(): for role in get_roles(): if role.is_default: return role return None def has_role(role_type): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): role = get_role() if role and role.type >= role_type: result = function(*args, **kwargs) else: result = current_app.login_manager.unauthorized() return result return wrapper return decorator <|reserved_special_token_1|> <|reserved_special_token_0|> def get_user(username): if username is None: return user = User.query.filter(User.username == username).first() return user def get_user_by_id(id): if id is None: return user = User.query.filter(User.alias_id == id).first() return user def add_user(username, password, first_name, last_name): if username is None or password is None: return calendar = Calendar(name=username) role = Role(type=Role.OWNER, calendar=calendar, is_default=True) user = User(username=username, first_name=first_name, last_name= last_name, roles=[role]) user.set_password(password) verify_email(user) db.session.add(user) db.session.commit() return user def update_user(first_name, last_name): user = flask_login.current_user user.first_name = first_name user.last_name = last_name db.session.merge(user) db.session.commit() return True def verify_email(user): token = user.generate_validate_token() url = current_app.config['APP_URL'] + '/auth/verify/' + token subject = '[Togger] Welcome to Togger. Verify your email' prepare_email(user.username, subject, url) def restore_password(token, new_password): user = User() if user.check_password_token(token): user = get_user(user.username) user.set_password(new_password) db.session.merge(user) db.session.commit() flask_login.login_user(user, remember=True) return True else: flash('Restoration link got expired. Please request a new one.', 'danger') return False def password_email(username): user = get_user(username) if user and user.is_verified: token = user.generate_password_token() url = current_app.config['APP_URL'] + '/auth/restore/' + token subject = ( '[Togger] Forgot your password? The restoration link is inside') prepare_email(user.username, subject, url) <|reserved_special_token_0|> def send_email(username, subject, content, config): msg = EmailMessage() msg.set_content(content) msg['Subject'] = subject msg['From'] = config['SMTP_MAILBOX'] msg['To'] = username s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT']) s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD']) s.send_message(msg) s.quit() def confirm_verify_email(token): user = User() if user.check_validate_token(token): user = get_user(user.username) user.is_verified = True db.session.merge(user) db.session.commit() else: flash('Verification link got expired. Please request a new one.', 'danger') def change_password(old_password, new_password): if flask_login.current_user.check_password(old_password): flask_login.current_user.set_password(new_password) db.session.merge(flask_login.current_user) db.session.commit() flash('Password was changed. Please sign in using new password.', 'success') return True flash('Current password is incorrect.', 'danger') return False def get_roles(): try: return flask_login.current_user.roles except AttributeError: return [] def get_role(): for role in get_roles(): if role.is_default: return role return None def has_role(role_type): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): role = get_role() if role and role.type >= role_type: result = function(*args, **kwargs) else: result = current_app.login_manager.unauthorized() return result return wrapper return decorator <|reserved_special_token_1|> import smtplib from email.message import EmailMessage from functools import wraps from threading import Thread import flask_login from flask import flash, current_app from togger import db from togger.auth.models import User, Role from togger.calendar.models import Calendar def get_user(username): if username is None: return user = User.query.filter(User.username == username).first() return user def get_user_by_id(id): if id is None: return user = User.query.filter(User.alias_id == id).first() return user def add_user(username, password, first_name, last_name): if username is None or password is None: return calendar = Calendar(name=username) role = Role(type=Role.OWNER, calendar=calendar, is_default=True) user = User(username=username, first_name=first_name, last_name= last_name, roles=[role]) user.set_password(password) verify_email(user) db.session.add(user) db.session.commit() return user def update_user(first_name, last_name): user = flask_login.current_user user.first_name = first_name user.last_name = last_name db.session.merge(user) db.session.commit() return True def verify_email(user): token = user.generate_validate_token() url = current_app.config['APP_URL'] + '/auth/verify/' + token subject = '[Togger] Welcome to Togger. Verify your email' prepare_email(user.username, subject, url) def restore_password(token, new_password): user = User() if user.check_password_token(token): user = get_user(user.username) user.set_password(new_password) db.session.merge(user) db.session.commit() flask_login.login_user(user, remember=True) return True else: flash('Restoration link got expired. Please request a new one.', 'danger') return False def password_email(username): user = get_user(username) if user and user.is_verified: token = user.generate_password_token() url = current_app.config['APP_URL'] + '/auth/restore/' + token subject = ( '[Togger] Forgot your password? The restoration link is inside') prepare_email(user.username, subject, url) def prepare_email(address, subject, content): thread = Thread(target=send_email, args=(address, subject, content, current_app.config)) thread.daemon = True thread.start() def send_email(username, subject, content, config): msg = EmailMessage() msg.set_content(content) msg['Subject'] = subject msg['From'] = config['SMTP_MAILBOX'] msg['To'] = username s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT']) s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD']) s.send_message(msg) s.quit() def confirm_verify_email(token): user = User() if user.check_validate_token(token): user = get_user(user.username) user.is_verified = True db.session.merge(user) db.session.commit() else: flash('Verification link got expired. Please request a new one.', 'danger') def change_password(old_password, new_password): if flask_login.current_user.check_password(old_password): flask_login.current_user.set_password(new_password) db.session.merge(flask_login.current_user) db.session.commit() flash('Password was changed. Please sign in using new password.', 'success') return True flash('Current password is incorrect.', 'danger') return False def get_roles(): try: return flask_login.current_user.roles except AttributeError: return [] def get_role(): for role in get_roles(): if role.is_default: return role return None def has_role(role_type): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): role = get_role() if role and role.type >= role_type: result = function(*args, **kwargs) else: result = current_app.login_manager.unauthorized() return result return wrapper return decorator <|reserved_special_token_1|> import smtplib from email.message import EmailMessage from functools import wraps from threading import Thread import flask_login from flask import flash, current_app from togger import db from togger.auth.models import User, Role from togger.calendar.models import Calendar def get_user(username): if username is None: return user = User.query.filter(User.username == username).first() return user def get_user_by_id(id): if id is None: return user = User.query.filter(User.alias_id == id).first() return user def add_user(username, password, first_name, last_name): if username is None or password is None: return calendar = Calendar(name=username) role = Role(type=Role.OWNER, calendar=calendar, is_default=True) user = User(username=username, first_name=first_name, last_name=last_name, roles=[role]) user.set_password(password) verify_email(user) db.session.add(user) db.session.commit() return user def update_user(first_name, last_name): user = flask_login.current_user user.first_name = first_name user.last_name = last_name db.session.merge(user) db.session.commit() return True def verify_email(user): token = user.generate_validate_token() url = current_app.config['APP_URL'] + "/auth/verify/" + token subject = "[Togger] Welcome to Togger. Verify your email" prepare_email(user.username, subject, url) def restore_password(token, new_password): user = User() if user.check_password_token(token): user = get_user(user.username) user.set_password(new_password) db.session.merge(user) db.session.commit() flask_login.login_user(user, remember=True) return True else: flash("Restoration link got expired. Please request a new one.", 'danger') return False def password_email(username): user = get_user(username) if user and user.is_verified: token = user.generate_password_token() url = current_app.config['APP_URL'] + "/auth/restore/" + token subject = "[Togger] Forgot your password? The restoration link is inside" prepare_email(user.username, subject, url) def prepare_email(address, subject, content): thread = Thread(target=send_email, args=(address, subject, content, current_app.config,)) thread.daemon = True thread.start() def send_email(username, subject, content, config): msg = EmailMessage() msg.set_content(content) msg['Subject'] = subject msg['From'] = config['SMTP_MAILBOX'] msg['To'] = username s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT']) s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD']) s.send_message(msg) s.quit() def confirm_verify_email(token): user = User() if user.check_validate_token(token): user = get_user(user.username) user.is_verified = True db.session.merge(user) db.session.commit() else: flash('Verification link got expired. Please request a new one.', 'danger') def change_password(old_password, new_password): if flask_login.current_user.check_password(old_password): flask_login.current_user.set_password(new_password) db.session.merge(flask_login.current_user) db.session.commit() flash('Password was changed. Please sign in using new password.', 'success') return True flash('Current password is incorrect.', 'danger') return False def get_roles(): try: return flask_login.current_user.roles except AttributeError: return [] def get_role(): for role in get_roles(): if role.is_default: return role return None def has_role(role_type): def decorator(function): @wraps(function) def wrapper(*args, **kwargs): role = get_role() if role and role.type >= role_type: result = function(*args, **kwargs) else: result = current_app.login_manager.unauthorized() return result return wrapper return decorator
flexible
{ "blob_id": "fab3e524edf6783775fabf402f9148bf31ac06d6", "index": 2914, "step-1": "<mask token>\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\n<mask token>\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n", "step-2": "<mask token>\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\n<mask token>\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n", "step-3": "<mask token>\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=\n last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\n<mask token>\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n", "step-4": "import smtplib\nfrom email.message import EmailMessage\nfrom functools import wraps\nfrom threading import Thread\nimport flask_login\nfrom flask import flash, current_app\nfrom togger import db\nfrom togger.auth.models import User, Role\nfrom togger.calendar.models import Calendar\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=\n last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + '/auth/verify/' + token\n subject = '[Togger] Welcome to Togger. Verify your email'\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash('Restoration link got expired. Please request a new one.',\n 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + '/auth/restore/' + token\n subject = (\n '[Togger] Forgot your password? The restoration link is inside')\n prepare_email(user.username, subject, url)\n\n\ndef prepare_email(address, subject, content):\n thread = Thread(target=send_email, args=(address, subject, content,\n current_app.config))\n thread.daemon = True\n thread.start()\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.',\n 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.',\n 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n\n def decorator(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n", "step-5": "import smtplib\nfrom email.message import EmailMessage\nfrom functools import wraps\nfrom threading import Thread\n\nimport flask_login\nfrom flask import flash, current_app\n\nfrom togger import db\nfrom togger.auth.models import User, Role\nfrom togger.calendar.models import Calendar\n\n\ndef get_user(username):\n if username is None:\n return\n user = User.query.filter(User.username == username).first()\n return user\n\n\ndef get_user_by_id(id):\n if id is None:\n return\n user = User.query.filter(User.alias_id == id).first()\n return user\n\n\ndef add_user(username, password, first_name, last_name):\n if username is None or password is None:\n return\n calendar = Calendar(name=username)\n role = Role(type=Role.OWNER, calendar=calendar, is_default=True)\n user = User(username=username, first_name=first_name, last_name=last_name, roles=[role])\n user.set_password(password)\n verify_email(user)\n db.session.add(user)\n db.session.commit()\n return user\n\n\ndef update_user(first_name, last_name):\n user = flask_login.current_user\n user.first_name = first_name\n user.last_name = last_name\n db.session.merge(user)\n db.session.commit()\n return True\n\n\ndef verify_email(user):\n token = user.generate_validate_token()\n url = current_app.config['APP_URL'] + \"/auth/verify/\" + token\n subject = \"[Togger] Welcome to Togger. Verify your email\"\n prepare_email(user.username, subject, url)\n\n\ndef restore_password(token, new_password):\n user = User()\n if user.check_password_token(token):\n user = get_user(user.username)\n user.set_password(new_password)\n db.session.merge(user)\n db.session.commit()\n flask_login.login_user(user, remember=True)\n return True\n else:\n flash(\"Restoration link got expired. Please request a new one.\", 'danger')\n return False\n\n\ndef password_email(username):\n user = get_user(username)\n if user and user.is_verified:\n token = user.generate_password_token()\n url = current_app.config['APP_URL'] + \"/auth/restore/\" + token\n subject = \"[Togger] Forgot your password? The restoration link is inside\"\n prepare_email(user.username, subject, url)\n\n\ndef prepare_email(address, subject, content):\n thread = Thread(target=send_email,\n args=(address, subject, content, current_app.config,))\n thread.daemon = True\n thread.start()\n\n\ndef send_email(username, subject, content, config):\n msg = EmailMessage()\n msg.set_content(content)\n msg['Subject'] = subject\n msg['From'] = config['SMTP_MAILBOX']\n msg['To'] = username\n s = smtplib.SMTP(config['SMTP_SERVER'], config['SMTP_PORT'])\n s.login(config['SMTP_LOGIN'], config['SMTP_PASSWORD'])\n s.send_message(msg)\n s.quit()\n\n\ndef confirm_verify_email(token):\n user = User()\n if user.check_validate_token(token):\n user = get_user(user.username)\n user.is_verified = True\n db.session.merge(user)\n db.session.commit()\n else:\n flash('Verification link got expired. Please request a new one.', 'danger')\n\n\ndef change_password(old_password, new_password):\n if flask_login.current_user.check_password(old_password):\n flask_login.current_user.set_password(new_password)\n db.session.merge(flask_login.current_user)\n db.session.commit()\n flash('Password was changed. Please sign in using new password.', 'success')\n return True\n flash('Current password is incorrect.', 'danger')\n return False\n\n\ndef get_roles():\n try:\n return flask_login.current_user.roles\n except AttributeError:\n return []\n\n\ndef get_role():\n for role in get_roles():\n if role.is_default:\n return role\n return None\n\n\ndef has_role(role_type):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n role = get_role()\n if role and role.type >= role_type:\n result = function(*args, **kwargs)\n else:\n result = current_app.login_manager.unauthorized()\n return result\n return wrapper\n return decorator\n", "step-ids": [ 11, 12, 13, 15, 16 ] }
[ 11, 12, 13, 15, 16 ]
<|reserved_special_token_0|> class Service(InstanceSet): <|reserved_special_token_0|> def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) <|reserved_special_token_0|> def identities(self): return list(self.instances.keys()) <|reserved_special_token_0|> def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> class ServiceInstance(object): <|reserved_special_token_0|> def update(self, **info): version = info.pop('version', None) if version: version = semantic_version.Version(version) self.version = version self.info.update(info) <|reserved_special_token_0|> <|reserved_special_token_0|> @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs) <|reserved_special_token_1|> <|reserved_special_token_0|> class ServiceInstance(object): def __init__(self, id=None, identity=None, **info): self.id = id self.identity = identity if identity else hash_id(info.get('endpoint')) self.info = {} self.update(**info) def update(self, **info): version = info.pop('version', None) if version: version = semantic_version.Version(version) self.version = version self.info.update(info) <|reserved_special_token_0|> def serialize(self): d = {'id': self.id, 'identity': self.identity, 'version': serialize_version(self.version)} d.update(self.info) return d @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs) <|reserved_special_token_1|> from __future__ import unicode_literals import abc import logging import six import semantic_version from lymph.utils import observables, hash_id from lymph.core.versioning import compatible, serialize_version logger = logging.getLogger(__name__) # Event types propagated by Service when instances change. ADDED = 'ADDED' REMOVED = 'REMOVED' UPDATED = 'UPDATED' class ServiceInstance(object): def __init__(self, id=None, identity=None, **info): self.id = id self.identity = identity if identity else hash_id(info.get('endpoint')) self.info = {} self.update(**info) def update(self, **info): version = info.pop('version', None) if version: version = semantic_version.Version(version) self.version = version self.info.update(info) def __getattr__(self, name): try: return self.info[name] except KeyError: raise AttributeError(name) def serialize(self): d = { 'id': self.id, 'identity': self.identity, 'version': serialize_version(self.version), } d.update(self.info) return d @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs)
flexible
{ "blob_id": "ba41f2a564f46032dbf72f7d17b2ea6deaa81b10", "index": 4332, "step-1": "<mask token>\n\n\nclass Service(InstanceSet):\n <mask token>\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n <mask token>\n\n def identities(self):\n return list(self.instances.keys())\n <mask token>\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n", "step-2": "<mask token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n", "step-3": "<mask token>\n\n\nclass ServiceInstance(object):\n <mask token>\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n <mask token>\n <mask token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n", "step-4": "<mask token>\n\n\nclass ServiceInstance(object):\n\n def __init__(self, id=None, identity=None, **info):\n self.id = id\n self.identity = identity if identity else hash_id(info.get('endpoint'))\n self.info = {}\n self.update(**info)\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n <mask token>\n\n def serialize(self):\n d = {'id': self.id, 'identity': self.identity, 'version':\n serialize_version(self.version)}\n d.update(self.info)\n return d\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n", "step-5": "from __future__ import unicode_literals\nimport abc\nimport logging\n\nimport six\nimport semantic_version\n\nfrom lymph.utils import observables, hash_id\nfrom lymph.core.versioning import compatible, serialize_version\n\n\nlogger = logging.getLogger(__name__)\n\n# Event types propagated by Service when instances change.\nADDED = 'ADDED'\nREMOVED = 'REMOVED'\nUPDATED = 'UPDATED'\n\n\nclass ServiceInstance(object):\n def __init__(self, id=None, identity=None, **info):\n self.id = id\n self.identity = identity if identity else hash_id(info.get('endpoint'))\n self.info = {}\n self.update(**info)\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n\n def __getattr__(self, name):\n try:\n return self.info[name]\n except KeyError:\n raise AttributeError(name)\n\n def serialize(self):\n d = {\n 'id': self.id,\n 'identity': self.identity,\n 'version': serialize_version(self.version),\n }\n d.update(self.info)\n return d\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n", "step-ids": [ 12, 18, 20, 22, 26 ] }
[ 12, 18, 20, 22, 26 ]
<|reserved_special_token_0|> def tarjan(): timer = time.time start = timer() voldemortResult = authorStore.get('_authors') allAuthors = voldemortResult[0][0] nodes = {} for author in allAuthors.get('content'): nodeKey = str(author) nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False] nodes[nodeKey] = nodeValue for nodeKey in nodes: node = nodes.get(nodeKey) if node[1] == -1: strongconnect(node, nodes) end = timer() for scc in components: print('==> NEUE KOMPONENTE') for node in scc: print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) + ', Name: ' + node[0].get('name')) print('Insgesamt sind es ' + str(len(components)) + ' Komponenten') print('Laufzeit: ' + str(end - start) + ' Sekunden') def strongconnect(node, allNodes): global index node[1] = index node[2] = index index += 1 stack.append(node) node[3] = True for kanteKey in node[0].get('friends'): kanteNode = allNodes.get(str(kanteKey)) if kanteNode[1] == -1: strongconnect(kanteNode, allNodes) node[2] = min(node[2], kanteNode[2]) elif kanteNode[3] == True: node[2] = min(node[2], kanteNode[1]) if node[1] == node[2]: scc = [] prevNode = None while prevNode != node: prevNode = stack.pop() prevNode[3] = False scc.append(prevNode) components.append(scc) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def tarjan(): timer = time.time start = timer() voldemortResult = authorStore.get('_authors') allAuthors = voldemortResult[0][0] nodes = {} for author in allAuthors.get('content'): nodeKey = str(author) nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False] nodes[nodeKey] = nodeValue for nodeKey in nodes: node = nodes.get(nodeKey) if node[1] == -1: strongconnect(node, nodes) end = timer() for scc in components: print('==> NEUE KOMPONENTE') for node in scc: print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) + ', Name: ' + node[0].get('name')) print('Insgesamt sind es ' + str(len(components)) + ' Komponenten') print('Laufzeit: ' + str(end - start) + ' Sekunden') def strongconnect(node, allNodes): global index node[1] = index node[2] = index index += 1 stack.append(node) node[3] = True for kanteKey in node[0].get('friends'): kanteNode = allNodes.get(str(kanteKey)) if kanteNode[1] == -1: strongconnect(kanteNode, allNodes) node[2] = min(node[2], kanteNode[2]) elif kanteNode[3] == True: node[2] = min(node[2], kanteNode[1]) if node[1] == node[2]: scc = [] prevNode = None while prevNode != node: prevNode = stack.pop() prevNode[3] = False scc.append(prevNode) components.append(scc) tarjan() <|reserved_special_token_1|> <|reserved_special_token_0|> authorStore = voldemort.StoreClient('authorStore', [{'0', 6666}]) stack = [] components = [] index = 1 def tarjan(): timer = time.time start = timer() voldemortResult = authorStore.get('_authors') allAuthors = voldemortResult[0][0] nodes = {} for author in allAuthors.get('content'): nodeKey = str(author) nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False] nodes[nodeKey] = nodeValue for nodeKey in nodes: node = nodes.get(nodeKey) if node[1] == -1: strongconnect(node, nodes) end = timer() for scc in components: print('==> NEUE KOMPONENTE') for node in scc: print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) + ', Name: ' + node[0].get('name')) print('Insgesamt sind es ' + str(len(components)) + ' Komponenten') print('Laufzeit: ' + str(end - start) + ' Sekunden') def strongconnect(node, allNodes): global index node[1] = index node[2] = index index += 1 stack.append(node) node[3] = True for kanteKey in node[0].get('friends'): kanteNode = allNodes.get(str(kanteKey)) if kanteNode[1] == -1: strongconnect(kanteNode, allNodes) node[2] = min(node[2], kanteNode[2]) elif kanteNode[3] == True: node[2] = min(node[2], kanteNode[1]) if node[1] == node[2]: scc = [] prevNode = None while prevNode != node: prevNode = stack.pop() prevNode[3] = False scc.append(prevNode) components.append(scc) tarjan() <|reserved_special_token_1|> import voldemort import time authorStore = voldemort.StoreClient('authorStore', [{'0', 6666}]) stack = [] components = [] index = 1 def tarjan(): timer = time.time start = timer() voldemortResult = authorStore.get('_authors') allAuthors = voldemortResult[0][0] nodes = {} for author in allAuthors.get('content'): nodeKey = str(author) nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False] nodes[nodeKey] = nodeValue for nodeKey in nodes: node = nodes.get(nodeKey) if node[1] == -1: strongconnect(node, nodes) end = timer() for scc in components: print('==> NEUE KOMPONENTE') for node in scc: print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) + ', Name: ' + node[0].get('name')) print('Insgesamt sind es ' + str(len(components)) + ' Komponenten') print('Laufzeit: ' + str(end - start) + ' Sekunden') def strongconnect(node, allNodes): global index node[1] = index node[2] = index index += 1 stack.append(node) node[3] = True for kanteKey in node[0].get('friends'): kanteNode = allNodes.get(str(kanteKey)) if kanteNode[1] == -1: strongconnect(kanteNode, allNodes) node[2] = min(node[2], kanteNode[2]) elif kanteNode[3] == True: node[2] = min(node[2], kanteNode[1]) if node[1] == node[2]: scc = [] prevNode = None while prevNode != node: prevNode = stack.pop() prevNode[3] = False scc.append(prevNode) components.append(scc) tarjan() <|reserved_special_token_1|> import voldemort import time authorStore = voldemort.StoreClient('authorStore', [{'0', 6666}]) stack = [] components = [] index = 1 # Implementation of the Tarjan algorithm for the detection of strongly connected components. # Function collects all authors in the database and outputs them as strongly connected components. def tarjan(): timer = time.time start = timer() voldemortResult = authorStore.get("_authors") allAuthors = voldemortResult[0][0] nodes = {} for author in allAuthors.get("content"): nodeKey = str(author) nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False] #node = Liste aus den Autorendaten, index(int), lowlink(int), onStack(boolean) nodes[nodeKey] = nodeValue for nodeKey in nodes: node = nodes.get(nodeKey) if node[1] == -1: strongconnect(node, nodes) end = timer() for scc in components: print("==> NEUE KOMPONENTE") for node in scc: print("Index: " + str(node[1]) + ", Lowlink: " + str(node[2]) + ", Name: " + node[0].get('name')) print("Insgesamt sind es " + str(len(components)) + " Komponenten") print("Laufzeit: " + str(end - start) + " Sekunden") # This method connects every node in the graph and builds, if applicable, a strongly connected component out of them. def strongconnect(node, allNodes): global index node[1] = index node[2] = index index += 1 stack.append(node) node[3] = True for kanteKey in node[0].get("friends"): kanteNode = allNodes.get(str(kanteKey)) if kanteNode[1] == -1: strongconnect(kanteNode, allNodes) node[2] = min(node[2], kanteNode[2]) elif kanteNode[3] == True: node[2] = min(node[2], kanteNode[1]) if node[1] == node[2]: scc = [] prevNode = None while prevNode != node: prevNode = stack.pop() prevNode[3] = False scc.append(prevNode) components.append(scc) tarjan()
flexible
{ "blob_id": "bb2c684fd5b962c97c033d4b4c2027d52b7371fd", "index": 499, "step-1": "<mask token>\n\n\ndef tarjan():\n timer = time.time\n start = timer()\n voldemortResult = authorStore.get('_authors')\n allAuthors = voldemortResult[0][0]\n nodes = {}\n for author in allAuthors.get('content'):\n nodeKey = str(author)\n nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]\n nodes[nodeKey] = nodeValue\n for nodeKey in nodes:\n node = nodes.get(nodeKey)\n if node[1] == -1:\n strongconnect(node, nodes)\n end = timer()\n for scc in components:\n print('==> NEUE KOMPONENTE')\n for node in scc:\n print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) +\n ', Name: ' + node[0].get('name'))\n print('Insgesamt sind es ' + str(len(components)) + ' Komponenten')\n print('Laufzeit: ' + str(end - start) + ' Sekunden')\n\n\ndef strongconnect(node, allNodes):\n global index\n node[1] = index\n node[2] = index\n index += 1\n stack.append(node)\n node[3] = True\n for kanteKey in node[0].get('friends'):\n kanteNode = allNodes.get(str(kanteKey))\n if kanteNode[1] == -1:\n strongconnect(kanteNode, allNodes)\n node[2] = min(node[2], kanteNode[2])\n elif kanteNode[3] == True:\n node[2] = min(node[2], kanteNode[1])\n if node[1] == node[2]:\n scc = []\n prevNode = None\n while prevNode != node:\n prevNode = stack.pop()\n prevNode[3] = False\n scc.append(prevNode)\n components.append(scc)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef tarjan():\n timer = time.time\n start = timer()\n voldemortResult = authorStore.get('_authors')\n allAuthors = voldemortResult[0][0]\n nodes = {}\n for author in allAuthors.get('content'):\n nodeKey = str(author)\n nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]\n nodes[nodeKey] = nodeValue\n for nodeKey in nodes:\n node = nodes.get(nodeKey)\n if node[1] == -1:\n strongconnect(node, nodes)\n end = timer()\n for scc in components:\n print('==> NEUE KOMPONENTE')\n for node in scc:\n print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) +\n ', Name: ' + node[0].get('name'))\n print('Insgesamt sind es ' + str(len(components)) + ' Komponenten')\n print('Laufzeit: ' + str(end - start) + ' Sekunden')\n\n\ndef strongconnect(node, allNodes):\n global index\n node[1] = index\n node[2] = index\n index += 1\n stack.append(node)\n node[3] = True\n for kanteKey in node[0].get('friends'):\n kanteNode = allNodes.get(str(kanteKey))\n if kanteNode[1] == -1:\n strongconnect(kanteNode, allNodes)\n node[2] = min(node[2], kanteNode[2])\n elif kanteNode[3] == True:\n node[2] = min(node[2], kanteNode[1])\n if node[1] == node[2]:\n scc = []\n prevNode = None\n while prevNode != node:\n prevNode = stack.pop()\n prevNode[3] = False\n scc.append(prevNode)\n components.append(scc)\n\n\ntarjan()\n", "step-3": "<mask token>\nauthorStore = voldemort.StoreClient('authorStore', [{'0', 6666}])\nstack = []\ncomponents = []\nindex = 1\n\n\ndef tarjan():\n timer = time.time\n start = timer()\n voldemortResult = authorStore.get('_authors')\n allAuthors = voldemortResult[0][0]\n nodes = {}\n for author in allAuthors.get('content'):\n nodeKey = str(author)\n nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]\n nodes[nodeKey] = nodeValue\n for nodeKey in nodes:\n node = nodes.get(nodeKey)\n if node[1] == -1:\n strongconnect(node, nodes)\n end = timer()\n for scc in components:\n print('==> NEUE KOMPONENTE')\n for node in scc:\n print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) +\n ', Name: ' + node[0].get('name'))\n print('Insgesamt sind es ' + str(len(components)) + ' Komponenten')\n print('Laufzeit: ' + str(end - start) + ' Sekunden')\n\n\ndef strongconnect(node, allNodes):\n global index\n node[1] = index\n node[2] = index\n index += 1\n stack.append(node)\n node[3] = True\n for kanteKey in node[0].get('friends'):\n kanteNode = allNodes.get(str(kanteKey))\n if kanteNode[1] == -1:\n strongconnect(kanteNode, allNodes)\n node[2] = min(node[2], kanteNode[2])\n elif kanteNode[3] == True:\n node[2] = min(node[2], kanteNode[1])\n if node[1] == node[2]:\n scc = []\n prevNode = None\n while prevNode != node:\n prevNode = stack.pop()\n prevNode[3] = False\n scc.append(prevNode)\n components.append(scc)\n\n\ntarjan()\n", "step-4": "import voldemort\nimport time\nauthorStore = voldemort.StoreClient('authorStore', [{'0', 6666}])\nstack = []\ncomponents = []\nindex = 1\n\n\ndef tarjan():\n timer = time.time\n start = timer()\n voldemortResult = authorStore.get('_authors')\n allAuthors = voldemortResult[0][0]\n nodes = {}\n for author in allAuthors.get('content'):\n nodeKey = str(author)\n nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]\n nodes[nodeKey] = nodeValue\n for nodeKey in nodes:\n node = nodes.get(nodeKey)\n if node[1] == -1:\n strongconnect(node, nodes)\n end = timer()\n for scc in components:\n print('==> NEUE KOMPONENTE')\n for node in scc:\n print('Index: ' + str(node[1]) + ', Lowlink: ' + str(node[2]) +\n ', Name: ' + node[0].get('name'))\n print('Insgesamt sind es ' + str(len(components)) + ' Komponenten')\n print('Laufzeit: ' + str(end - start) + ' Sekunden')\n\n\ndef strongconnect(node, allNodes):\n global index\n node[1] = index\n node[2] = index\n index += 1\n stack.append(node)\n node[3] = True\n for kanteKey in node[0].get('friends'):\n kanteNode = allNodes.get(str(kanteKey))\n if kanteNode[1] == -1:\n strongconnect(kanteNode, allNodes)\n node[2] = min(node[2], kanteNode[2])\n elif kanteNode[3] == True:\n node[2] = min(node[2], kanteNode[1])\n if node[1] == node[2]:\n scc = []\n prevNode = None\n while prevNode != node:\n prevNode = stack.pop()\n prevNode[3] = False\n scc.append(prevNode)\n components.append(scc)\n\n\ntarjan()\n", "step-5": "import voldemort\nimport time\n\nauthorStore = voldemort.StoreClient('authorStore', [{'0', 6666}])\nstack = []\ncomponents = []\nindex = 1\n\n# Implementation of the Tarjan algorithm for the detection of strongly connected components.\n# Function collects all authors in the database and outputs them as strongly connected components.\ndef tarjan():\n\ttimer = time.time\n\tstart = timer()\n\tvoldemortResult = authorStore.get(\"_authors\")\n\tallAuthors = voldemortResult[0][0]\n\tnodes = {}\n\tfor author in allAuthors.get(\"content\"):\n\t\tnodeKey = str(author)\n\t\tnodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]\n\t\t#node = Liste aus den Autorendaten, index(int), lowlink(int), onStack(boolean)\n\t\tnodes[nodeKey] = nodeValue\n\tfor nodeKey in nodes:\n\t\tnode = nodes.get(nodeKey)\n\t\tif node[1] == -1:\n\t\t\tstrongconnect(node, nodes)\n\tend = timer()\n\tfor scc in components:\n\t\tprint(\"==> NEUE KOMPONENTE\")\n\t\tfor node in scc:\n\t\t\tprint(\"Index: \" + str(node[1]) + \", Lowlink: \" + str(node[2]) + \", Name: \" + node[0].get('name'))\n\tprint(\"Insgesamt sind es \" + str(len(components)) + \" Komponenten\")\n\tprint(\"Laufzeit: \" + str(end - start) + \" Sekunden\")\n\n# This method connects every node in the graph and builds, if applicable, a strongly connected component out of them.\ndef strongconnect(node, allNodes):\n\tglobal index\n\tnode[1] = index\n\tnode[2] = index\n\tindex += 1\n\tstack.append(node)\n\tnode[3] = True\n\tfor kanteKey in node[0].get(\"friends\"):\n\t\tkanteNode = allNodes.get(str(kanteKey))\n\t\tif kanteNode[1] == -1:\n\t\t\tstrongconnect(kanteNode, allNodes)\n\t\t\tnode[2] = min(node[2], kanteNode[2])\n\t\telif kanteNode[3] == True:\n\t\t\tnode[2] = min(node[2], kanteNode[1])\n\tif node[1] == node[2]:\n\t\tscc = []\n\t\tprevNode = None\n\t\twhile prevNode != node:\n\t\t\tprevNode = stack.pop()\n\t\t\tprevNode[3] = False\n\t\t\tscc.append(prevNode)\n\t\tcomponents.append(scc)\n\ntarjan()", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" Common, pure functions used by the D-BAS. .. codeauthor:: Tobias Krauthoff <[email protected] """ import hashlib import locale import os import re import warnings from collections import defaultdict from datetime import datetime from enum import Enum, auto from html import escape, unescape from typing import List from urllib import parse from uuid import uuid4 from sqlalchemy import func from dbas.database import DBDiscussionSession from dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \ ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup from dbas.logger import logger from dbas.strings.keywords import Keywords as _ from dbas.strings.translator import Translator nick_of_anonymous_user = 'anonymous' fallback_lang = 'en' tag_type = 'span' start_attack = '<{} data-argumentation-type="attack">'.format(tag_type) start_argument = '<{} data-argumentation-type="argument">'.format(tag_type) start_position = '<{} data-argumentation-type="position">'.format(tag_type) start_content = '<{} class="triangle-content-text">'.format(tag_type) start_pro = '<{} data-attitude="pro">'.format(tag_type) start_con = '<{} data-attitude="con">'.format(tag_type) start_tag = '<{}>'.format(tag_type) end_tag = '</{}>'.format(tag_type) class BubbleTypes(Enum): USER = auto() SYSTEM = auto() STATUS = auto() INFO = auto() def __str__(self): return str(self.value) class Relations(Enum): UNDERMINE = 'undermine' UNDERCUT = 'undercut' REBUT = 'rebut' SUPPORT = 'support' def __str__(self): return str(self.value) class Attitudes(Enum): AGREE = 'agree' DISAGREE = 'disagree' DONT_KNOW = 'dontknow' def __str__(self): return str(self.value) relation_mapper = {relation.value: relation for relation in Relations} attitude_mapper = {attitude.value: attitude for attitude in Attitudes} def get_global_url(): """ Returns the global url of the project, based on the ENV :return: String """ return os.environ.get('URL', '') def get_changelog(no): """ Returns the 'no' last entries from the changelog :param no: int :return: list """ path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md')) lines = [line.rstrip('\n').strip() for line in open(path) if len(line.rstrip('\n').strip()) > 0] changelog = [] title = '' body = [] for l in lines: if l.startswith('#'): if len(title) > 0: changelog.append({'title': title, 'body': body}) body = [] title = l.replace('### ', '') else: body.append(l.replace('- ', '')) return changelog[0:no] def is_development_mode(registry): """ Returns true, if mode is set to development in current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['mode'].lower() == 'development' return False def usage_of_modern_bubbles(registry): """ Returns true, if modern bubbles are set in the current ini file. :param registry: request.registry :return: Boolean """ if 'modern_bubbles' in registry.settings: return registry.settings['modern_bubbles'].lower() == 'true' return False def usage_of_matomo(registry): """ Returns true, if matomo is set in the current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['usage_of_matomo'].lower() == 'true' return False def escape_string(text): """ Escapes all html special chars. :param text: string :return: html.escape(text) """ return escape(text) def get_discussion_language(matchdict, params, session, current_issue_uid=None): """ Returns Language.ui_locales CALL AFTER issue_handler.get_id_of_slug(..)! :param matchdict: matchdict of the current request :param params: params of the current request :param session: session of the current request :param current_issue_uid: uid :return: """ if not current_issue_uid: current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False, Issue.is_private == False).first() current_issue_uid = current_issue.uid if current_issue else None # first matchdict, then params, then session, afterwards fallback issue = matchdict['issue'] if 'issue' in matchdict \ else params['issue'] if 'issue' in params \ else session['issue'] if 'issue' in session \ else current_issue_uid db_issue = DBDiscussionSession.query(Issue).get(issue) return db_issue.lang if db_issue else 'en' def python_datetime_pretty_print(ts, lang): """ Pretty print of a locale :param ts: Timestamp :param lang: ui_locales :return: String """ formatter = '%b. %d.' if lang == 'de': try: locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8') formatter = '%d. %b.' except locale.Error: locale.setlocale(locale.LC_TIME, 'en_US.UTF8') return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter) def get_all_arguments_by_statement(statement_uid, include_disabled=False): """ Returns a list of all arguments where the statement is a conclusion or member of the premisegroup :param statement_uid: Statement.uid :param include_disabled: Boolean :return: [Arguments] """ logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled)) db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled) arg_array = [arg for arg in db_arguments] if db_arguments else [] premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid) if not include_disabled: premises = premises.filter_by(is_disabled=False) premises = premises.all() for premise in premises: arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled) db_undercuts = [] for arg in arg_array: db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled) db_undercutted_undercuts = [] for arg in db_undercuts: db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled) arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts)) logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array])) return arg_array if len(arg_array) > 0 else None def __get_argument_of_premisegroup(premisegroup_uid, include_disabled): """ Returns all arguments with the given premisegroup :param premisegroup_uid: PremisgGroup.uid :param include_disabled: Boolean :return: list of Arguments """ db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid) if not include_disabled: db_arguments = db_arguments.filter_by(is_disabled=False) return db_arguments.all() if db_arguments else [] def __get_undercuts_of_argument(argument_uid, include_disabled): """ Returns all undercuts fo the given argument :param argument_uid: Argument.uid :param include_disabled: boolean :return: list of Arguments """ db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid) if not include_disabled: db_undercuts = db_undercuts.filter_by(is_disabled=False) return db_undercuts.all() if db_undercuts else [] def __get_arguments_of_conclusion(statement_uid, include_disabled): """ Returns all arguments, where the statement is set as conclusion :param statement_uid: Statement.uid :param include_disabled: Boolean :return: list of arguments """ db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid) if not include_disabled: db_arguments = db_arguments.filter_by(is_disabled=False) return db_arguments.all() if db_arguments else [] def get_all_arguments_with_text_by_statement_id(statement_uid): """ Given a statement_uid, it returns all arguments, which use this statement and adds the corresponding text to it, which normally appears in the bubbles. The resulting text depends on the provided language. :param statement_uid: uid to a statement, which should be analyzed :return: list of dictionaries containing some properties of these arguments :rtype: list """ logger('DBAS.LIB', 'main ' + str(statement_uid)) arguments = get_all_arguments_by_statement(statement_uid) results = [] if arguments: results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments] return results def get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False, is_jump=False): """ Given a statement_uid, it returns all arguments, which use this statement and adds the corresponding text to it, which normally appears in the bubbles. The resulting text depends on the provided language. :param db_statement: Statement :param urlmanager: :param color_statement: True, if the statement (specified by the ID) should be colored :return: list of dictionaries containing some properties of these arguments :rtype: list """ logger('DBAS.LIB', 'main ' + str(db_statement.uid)) arguments = get_all_arguments_by_statement(db_statement.uid) uids = [arg.uid for arg in arguments] if arguments else None results = list() sb = '<{} data-argumentation-type="position">'.format(tag_type) if color_statement else '' se = '</{}>'.format(tag_type) if color_statement else '' if not uids: return [] uids.sort() for uid in uids: statement_text = db_statement.get_text() attack_type = 'jump' if is_jump else '' argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type) pos = argument_text.lower().find(statement_text.lower()) argument_text = argument_text[:pos] + sb + argument_text[pos:] pos += len(statement_text) + len(sb) argument_text = argument_text[:pos] + se + argument_text[pos:] results.append({ 'uid': uid, 'text': argument_text, 'url': urlmanager.get_url_for_jump(uid) }) return results def get_slug_by_statement_uid(uid): """ Returns slug for the given Issue.uid :param uid: Issue.uid :return: String """ db_statement = DBDiscussionSession.query(Statement).get(uid) return resolve_issue_uid_to_slug(db_statement.issue_uid) def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False, user_changed_opinion=False, rearrange_intro=False, colored_position=False, attack_type=None, minimize_on_undercut=False, is_users_opinion=True, anonymous_style=False, support_counter_argument=False): """ Returns current argument as string like "conclusion, because premise1 and premise2" :param uid: Integer :param with_html_tag: Boolean :param start_with_intro: Boolean :param first_arg_by_user: Boolean :param user_changed_opinion: Boolean :param rearrange_intro: Boolean :param colored_position: Boolean :param attack_type: String :param minimize_on_undercut: Boolean :param anonymous_style: Boolean :param support_counter_argument: Boolean :return: String """ logger('DBAS.LIB', 'main {}'.format(uid)) db_argument = DBDiscussionSession.query(Argument).get(uid) if not db_argument: return None lang = db_argument.lang _t = Translator(lang) premisegroup_by_user = False author_uid = None db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first() if db_user: author_uid = db_user.uid pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid) marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by( argument_uid=uid, author_uid=db_user.uid).first() premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None # getting all argument id arg_array = [db_argument] while db_argument.argument_uid: db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid) arg_array.append(db_argument) if attack_type == 'jump': return __build_argument_for_jump(arg_array, with_html_tag) if len(arg_array) == 1: # build one argument only return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t, start_with_intro, is_users_opinion, anonymous_style, support_counter_argument, author_uid) else: # get all pgroups and at last, the conclusion return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag, start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t) def __build_argument_for_jump(arg_array: List[Argument], with_html_tag): """ Build tet for an argument, if we jump to this argument :param arg_array: [Argument] :param with_html_tag: Boolean :return: String """ tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">') if with_html_tag else '' tag_conclusion = ('<' + tag_type + ' data-argumentation-type="argument">') if with_html_tag else '' tag_end = ('</' + tag_type + '>') if with_html_tag else '' lang = arg_array[0].lang _t = Translator(lang) if len(arg_array) == 1: ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t) elif len(arg_array) == 2: ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t) else: ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t) return ret_value.replace(' ', ' ') def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t): premises = db_argument.get_premisegroup_text() if premises[-1] != '.': premises += '.' conclusion = db_argument.get_conclusion_text() because = _t.get(_.because).lower() conclusion = tag_conclusion + conclusion + tag_end premises = tag_premise + premises + tag_end intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else '' ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises) if _t.get_lang() == 'de': intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous) intro = intro[0:1].upper() + intro[1:] intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises) return ret_value def __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t): db_undercut = arg_array[0] db_conclusion_argument = arg_array[1] premise = db_undercut.get_premisegroup_text() conclusion_premise = db_conclusion_argument.get_premisegroup_text() conclusion_conclusion = db_conclusion_argument.get_conclusion_text() premise = tag_premise + premise + tag_end conclusion_premise = tag_conclusion + conclusion_premise + tag_end conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else '' bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag because = _t.get(_.because) ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise) return ret_value def __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t): premise1 = arg_array[0].get_premisegroup_text() premise2 = arg_array[1].get_premisegroup_text() premise3 = arg_array[2].get_premisegroup_text() conclusion = arg_array[2].get_conclusion_text() bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag because = _t.get(_.because) seperator = ',' if _t.get_lang() == 'de' else '' premise1 = tag_premise + premise1 + tag_end premise2 = tag_conclusion + premise2 + tag_end argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3) argument = tag_conclusion + argument + tag_end # P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1 ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1) return ret_value def __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool, attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool, anonymous_style: bool, support_counter_argument: bool=False, author_uid=None): """ Build up argument text for a single argument Please, do not touch this! :param uid: Argument.uid :param rearrange_intro: Boolean :param with_html_tag: Boolean :param colored_position: Boolean :param attack_type: String :param _t: Translator :param start_with_intro: Boolean :param is_users_opinion: Boolean :param anonymous_style: Boolean :param support_counter_argument: Boolean :param author_uid: User.uid :return: String """ premises_text = db_argument.get_premisegroup_text() conclusion_text = db_argument.get_conclusion_text() lang = db_argument.lang if lang != 'de': premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises_text, conclusion_text) marked_element = False if author_uid: db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.author_uid == author_uid).first() marked_element = db_marked is not None you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip() if lang == 'de': ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style, rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang, premises_text, conclusion_text, is_users_opinion, support_counter_argument) else: ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion_text, premises_text, db_argument) return ret_value.replace(' ', ' ') def __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion): sb_none = start_tag if with_html_tag else '' se = end_tag if with_html_tag else '' if attack_type not in ['dont_know', 'jump']: sb = start_tag if with_html_tag else '' if colored_position: sb = start_position if with_html_tag else '' if attack_type == Relations.UNDERMINE: premises = sb + premises + se else: conclusion = sb + conclusion + se else: sb = start_argument if with_html_tag else '' sb_tmp = start_attack if with_html_tag else '' premises = sb + premises + se conclusion = sb_tmp + conclusion + se return premises, conclusion, sb, sb_none, se def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style, rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang, premises, conclusion, is_users_opinion, support_counter_argument): if start_with_intro and not anonymous_style: intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat) if rearrange_intro: intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat) ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' ' elif is_users_opinion and not anonymous_style: ret_value = sb_none if support_counter_argument: ret_value += _t.get(_.youAgreeWithThecounterargument) elif marked_element: ret_value += you_have_the_opinion_that else: ret_value += _t.get(_.youArgue) ret_value += se + ' ' else: tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous) ret_value = sb_none + sb + tmp + se + ' ' ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else '' ret_value += conclusion ret_value += ', ' if lang == 'de' else ' ' ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises return ret_value def __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg): tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' ' ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' ' ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp ret_value += ' ' + premises return ret_value def __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag, start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t): """ :param arg_array: :param first_arg_by_user: :param user_changed_opinion: :param with_html_tag: :param start_with_intro: :param minimize_on_undercut: :param anonymous_style: :param premisegroup_by_user: :param _t: :return: """ # get all pgroups and at last, the conclusion pgroups = [] supportive = [] arg_array = arg_array[::-1] local_lang = arg_array[0].lang # grepping all arguments in the chain for db_argument in arg_array: text = db_argument.get_premisegroup_text() pgroups.append(text) supportive.append(db_argument.is_supportive) conclusion = arg_array[0].get_conclusion_text() # html tags for framing sb = start_position if with_html_tag else '' se = end_tag if with_html_tag else '' because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' ' if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' ' tmp_users_opinion = True # user after system elif not anonymous_style: # user starts ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else '' tmp_users_opinion = False # system after user conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print else: ret_value = _t.get(_.someoneArgued) + ' ' tmp_users_opinion = False tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else '' ret_value += tmp + conclusion + because + pgroups[0] + '.' del pgroups[0] # just display the last premise group on undercuts, because the story is always saved in all bubbles if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2: return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.' for i, pgroup in enumerate(pgroups): ret_value += ' ' if tmp_users_opinion and not anonymous_style: tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp) elif not anonymous_style: ret_value += _t.get(_.youAgreeWithThatNow) else: ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat) ret_value += sb + ' ' + pgroups[i] + '.' tmp_users_opinion = not tmp_users_opinion return ret_value.replace(' ', ' ') def get_text_for_premisegroup_uid(uid): """ Returns joined text of the premise group and the premise ids :param uid: premisegroup_uid :return: text, uids """ warnings.warn("Use PremiseGroup.get_text() instead.", DeprecationWarning) db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all() if len(db_premises) == 0: return '' texts = [premise.get_text() for premise in db_premises] lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang _t = Translator(lang) return ' {} '.format(_t.get(_.aand)).join(texts) def get_text_for_statement_uid(uid: int, colored_position=False): """ Returns text of statement with given uid :param uid: Statement.uid :param colored_position: Boolean :return: String """ warnings.warn("Use Statement.get_text() or Statement.get_html() instead.", DeprecationWarning) if not isinstance(uid, int): return None db_statement = DBDiscussionSession.query(Statement).get(uid) if not db_statement: return None db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get( db_statement.textversion_uid) content = db_textversion.content while content.endswith(('.', '?', '!')): content = content[:-1] sb, se = '', '' if colored_position: sb = '<{} data-argumentation-type="position">'.format(tag_type) se = '</{}>'.format(tag_type) return sb + content + se def get_text_for_premise(uid: int, colored_position: bool = False): """ Returns text of premise with given uid :param uid: Statement.uid :param colored_position: Boolean :return: String """ db_premise = DBDiscussionSession.query(Premise).get(uid) if db_premise: return db_premise.get_text(html=colored_position) else: return None def get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True): """ Check the arguments conclusion whether it is an statement or an argument and returns the text :param argument: Argument :param start_with_intro: Boolean :param rearrange_intro: Boolean :return: String """ if argument.argument_uid: return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro, is_users_opinion=is_users_opinion) else: return argument.get_conclusion_text() def resolve_issue_uid_to_slug(uid): """ Given the issue uid query database and return the correct slug of the issue. :param uid: issue_uid :type uid: int :return: Slug of issue :rtype: str """ issue = DBDiscussionSession.query(Issue).get(uid) return issue.slug if issue else None def get_all_attacking_arg_uids_from_history(history): """ Returns all arguments of the history, which attacked the user :param history: String :return: [Arguments.uid] :rtype: list """ try: splitted_history = history.split('-') uids = [] for part in splitted_history: if 'reaction' in part: parts = part.split('/') pos = parts.index('reaction') uids.append(part.split('/')[pos + 3]) return uids except AttributeError: return [] def get_user_by_private_or_public_nickname(nickname): """ Gets the user by his (public) nickname, based on the option, whether his nickname is public or not :param nickname: Nickname of the user :return: Current user or None """ db_user = get_user_by_case_insensitive_nickname(nickname) db_public_user = get_user_by_case_insensitive_public_nickname(nickname) uid = 0 if db_user: uid = db_user.uid elif db_public_user: uid = db_public_user.uid db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first() if not db_settings: return None if db_settings.should_show_public_nickname and db_user: return db_user elif not db_settings.should_show_public_nickname and db_public_user: return db_public_user return None def get_user_by_case_insensitive_nickname(nickname): """ Returns user with given nickname :param nickname: String :return: User or None """ return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first() def get_user_by_case_insensitive_public_nickname(public_nickname): """ Returns user with given public nickname :param public_nickname: String :return: User or None """ return DBDiscussionSession.query(User).filter( func.lower(User.public_nickname) == func.lower(public_nickname)).first() def pretty_print_options(message): """ Some modifications for pretty printing. Use uppercase for first letter in text and a single dot for the end if there isn't one already. :param message: String :return: String """ # check for html if message[0:1] == '<': pos = message.index('>') message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:] else: message = message[0:1].upper() + message[1:] # check for html if message[-1] == '>': pos = message.rfind('<') if message[pos - 1:pos] not in ['.', '?', '!']: message = message[0:pos] + '.' + message[pos:] elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now': message += '.' return message def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='', bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False, argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False, nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None): """ Creates an dictionary which includes every information needed for a bubble. :param bubble_type: BubbleTypes :param is_markable: True if the content itself could be flagged :param is_author: True if the current user is author of the content :param uid: Identifier for the bubble :param bubble_url: URL for the click event of the bubble :param content: Text of the bubble :param omit_bubble_url: True if the bubble should have a link :param omit_vote_info: True if the bubble have the little, grey information text :param argument_uid: Argument.uid :param statement_uid: Statement.uid :param is_supportive: Boolean :param nickname: String :param omit_bubble_url: Boolean :param lang: is_users_opinion :param is_users_opinion: Boolean :return: dict() """ gravatar_link = get_global_url() + '/static/images/icon.png' profile = None if uid is not 'now': content = pretty_print_options(content) if bubble_type is BubbleTypes.SYSTEM and other_author is not None: gravatar_link = get_profile_picture(other_author, 25) profile = '/user/{}'.format(other_author.uid), # check for users opinion if bubble_type is BubbleTypes.USER and nickname != 'anonymous': db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first() db_marked = None gravatar_link = get_profile_picture(db_user, 25) if argument_uid is not None and db_user is not None: db_marked = DBDiscussionSession.query(MarkedArgument).filter( MarkedArgument.argument_uid == argument_uid, MarkedArgument.author_uid == db_user.uid).first() if statement_uid is not None and db_user is not None: db_marked = DBDiscussionSession.query(MarkedStatement).filter( MarkedStatement.statement_uid == statement_uid, MarkedStatement.author_uid == db_user.uid).first() is_users_opinion = db_marked is not None speech = { 'is_user': bubble_type is BubbleTypes.USER, 'is_system': bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO, 'is_markable': is_markable, 'is_author': is_author, 'id': uid if len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url, 'message': content, 'omit_bubble_url': omit_bubble_url, 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None', 'data_argument_uid': argument_uid, 'data_statement_uid': statement_uid, 'data_is_supportive': is_supportive, 'is_users_opinion': is_users_opinion, 'enemy': { 'avatar': gravatar_link, 'profile': profile, 'available': profile is not None } } votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid, statement_uid, speech, lang) speech['votecounts_message'] = votecount_keys[speech['votecounts']] return speech def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang): """ Build text for a bubble, how many other participants have the same interest? :param nickname: User.nickname :param is_user: boolean :param argument_uid: Argument.uid :param statement_uid: Statement.uid :param speech: dict() :param lang: ui_locales :return: [String] """ if not nickname: nickname = 'anonymous' db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first() if not db_user: db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first() db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user) _t = Translator(lang) speech['votecounts'] = len(db_clicks) if db_clicks else 0 if db_marks: speech['votecounts'] += len(db_marks) votecount_keys = defaultdict(lambda: "{} {}.".format(speech['votecounts'], _t.get(_.voteCountTextMore))) if is_user and db_user.gender == 'm': gender_key = _.voteCountTextFirstM elif is_user and db_user.gender == 'f': gender_key = _.voteCountTextFirstF else: gender_key = _.voteCountTextFirst votecount_keys[0] = '{}.'.format(_t.get(gender_key)) votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.' return votecount_keys def __get_clicks_and_marks(argument_uid, statement_uid, db_user): db_clicks = None db_marks = None if argument_uid: db_clicks = DBDiscussionSession.query(ClickedArgument). \ filter(ClickedArgument.argument_uid == argument_uid, ClickedArgument.is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.author_uid != db_user.uid).all() db_marks = DBDiscussionSession.query(MarkedArgument). \ filter(MarkedArgument.argument_uid == argument_uid, MarkedArgument.author_uid != db_user.uid).all() elif statement_uid: db_clicks = DBDiscussionSession.query(ClickedStatement). \ filter(ClickedStatement.statement_uid == statement_uid, ClickedStatement.is_up_vote == True, ClickedStatement.is_valid, ClickedStatement.author_uid != db_user.uid).all() db_marks = DBDiscussionSession.query(MarkedStatement). \ filter(MarkedStatement.statement_uid == statement_uid, MarkedStatement.author_uid != db_user.uid).all() return db_clicks, db_marks def is_argument_disabled_due_to_disabled_statements(argument): """ Returns true if any involved statement is disabled. :param argument: Argument :return: Boolean """ if argument.conclusion_uid is None: # check conclusion of given arguments conclusion db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid) conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid) if conclusion.is_disabled: return True # check premisegroup of given arguments conclusion premises = __get_all_premises_of_argument(db_argument) for premise in premises: if premise.statements.is_disabled: return True else: # check conclusion of given argument print(argument.conclusion_uid) conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid) if conclusion.is_disabled: return True # check premisegroup of given argument premises = __get_all_premises_of_argument(argument) for premise in premises: if premise.statements.is_disabled: return True return False def is_author_of_statement(db_user: User, statement_uid: int) -> bool: """ Is the user with given nickname author of the statement? :param db_user: User :param statement_uid: Statement.uid :return: Boolean """ db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None if not db_user: return False db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by( TextVersion.uid.asc()).first() # TODO #432 if not db_textversion: return False return db_textversion.author_uid == db_user.uid def is_author_of_argument(db_user: User, argument_uid: int) -> bool: """ Is the user with given nickname author of the argument? :param db_user: User :param argument_uid: Argument.uid :return: Boolean """ db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None if not db_user: return False db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid, Argument.author_uid == db_user.uid).first() return True if db_argument else False def __get_all_premises_of_argument(argument): """ Returns list with all premises of the argument. :param argument: Argument :return: list() """ ret_list = [] db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join( Statement).all() for premise in db_premises: ret_list.append(premise) return ret_list def get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False): """ Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px :param user: User :param size: Integer, default 80 :param ignore_privacy_settings: :return: String """ additional_id = '' if user and isinstance(user, User): additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x' return __get_gravatar(user, additional_id, size) def get_public_profile_picture(user: User, size: int = 80): """ Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px If the user doesn't want an public profile, an anonymous image will be returned :param user: User :param size: Integer, default 80 :return: String """ additional_id = '' if user.settings.should_show_public_nickname: additional_id = 'x' if len(str(user.oauth_provider)) > 0: additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id) return __get_gravatar(user, additional_id, size) def __get_gravatar(user, additional_id, size): if user: if str(user.email) == 'None': email = (user.nickname + additional_id).encode('utf-8') else: email = (user.email + additional_id).encode('utf-8') else: email = 'unknown'.encode('utf-8') gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest()) gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)}) return gravatar_url def get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20): """ Returns a-tag with gravatar of current author and users page as href :param uid: Uid of the author :param gravatar_on_right_side: True, if the gravatar is on the right of authors name :param linked_with_users_page: True, if the text is a link to the authors site :param profile_picture_size: Integer :return: HTML-String """ db_user = DBDiscussionSession.query(User).get(int(uid)) if not db_user: return None, 'Missing author with uid ' + str(uid), False nick = db_user.global_nickname img_src = get_profile_picture(db_user, profile_picture_size) link_begin = '' link_end = '' if linked_with_users_page: link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick) link_end = '</a>' side = 'left' if gravatar_on_right_side else 'right' img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(img_src, side) if gravatar_on_right_side: return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True else: return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True def bubbles_already_last_in_list(bubble_list, bubbles): """ Are the given bubbles already at the end of the bubble list :param bubble_list: list of Bubbles :param bubbles: list of bubbles :return: Boolean """ if isinstance(bubbles, list): length = len(bubbles) else: length = 1 bubbles = [bubbles] if len(bubble_list) < length: return False for bubble in bubbles: if 'message' not in bubble: return False start_index = - length is_already_in = False for bubble in bubbles: last = bubble_list[start_index] if 'message' not in last or 'message' not in bubble: return False text1 = unhtmlify(last['message'].lower()).strip() text2 = unhtmlify(bubble['message'].lower()).strip() is_already_in = is_already_in or (text1 == text2) start_index += 1 return is_already_in def unhtmlify(html): """ Remove html-tags and unescape encoded html-entities. :param html: Evil-string containing html :return: """ return unescape(re.sub(r'<.*?>', '', html))
normal
{ "blob_id": "10a9437453371bd7472e93af1026c778b7983cf8", "index": 1137, "step-1": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\n<mask token>\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">' if\n with_html_tag else '')\n tag_conclusion = ('<' + tag_type +\n ' data-argumentation-type=\"argument\">' if with_html_tag else '')\n tag_end = '</' + tag_type + '>' if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise,\n tag_conclusion, tag_end, _t)\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise,\n tag_conclusion, tag_end, _t)\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array,\n tag_premise, tag_conclusion, tag_end, _t)\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise,\n tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n intro = _t.get(_.statementAbout) + ' ' if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind,\n conclusion_conclusion, because, premise)\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =argument.premisegroup_uid).join(Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n if len(bubble_list) < length:\n return False\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n start_index = -length\n is_already_in = False\n for bubble in bubbles:\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or text1 == text2\n start_index += 1\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub('<.*?>', '', html))\n", "step-5": "\"\"\"\nCommon, pure functions used by the D-BAS.\n\n\n.. codeauthor:: Tobias Krauthoff <[email protected]\n\"\"\"\nimport hashlib\nimport locale\nimport os\nimport re\nimport warnings\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom enum import Enum, auto\nfrom html import escape, unescape\nfrom typing import List\nfrom urllib import parse\nfrom uuid import uuid4\n\nfrom sqlalchemy import func\n\nfrom dbas.database import DBDiscussionSession\nfrom dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \\\n ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup\nfrom dbas.logger import logger\nfrom dbas.strings.keywords import Keywords as _\nfrom dbas.strings.translator import Translator\n\nnick_of_anonymous_user = 'anonymous'\n\nfallback_lang = 'en'\ntag_type = 'span'\nstart_attack = '<{} data-argumentation-type=\"attack\">'.format(tag_type)\nstart_argument = '<{} data-argumentation-type=\"argument\">'.format(tag_type)\nstart_position = '<{} data-argumentation-type=\"position\">'.format(tag_type)\nstart_content = '<{} class=\"triangle-content-text\">'.format(tag_type)\nstart_pro = '<{} data-attitude=\"pro\">'.format(tag_type)\nstart_con = '<{} data-attitude=\"con\">'.format(tag_type)\nstart_tag = '<{}>'.format(tag_type)\nend_tag = '</{}>'.format(tag_type)\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\nrelation_mapper = {relation.value: relation for relation in Relations}\nattitude_mapper = {attitude.value: attitude for attitude in Attitudes}\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,\n Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n\n # first matchdict, then params, then session, afterwards fallback\n issue = matchdict['issue'] if 'issue' in matchdict \\\n else params['issue'] if 'issue' in params \\\n else session['issue'] if 'issue' in session \\\n else current_issue_uid\n\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled)\n\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False,\n is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n\n if not uids:\n return []\n\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n\n results.append({\n 'uid': uid,\n 'text': argument_text,\n 'url': urlmanager.get_url_for_jump(uid)\n })\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False,\n user_changed_opinion=False, rearrange_intro=False, colored_position=False,\n attack_type=None, minimize_on_undercut=False, is_users_opinion=True,\n anonymous_style=False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first()\n\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid,\n author_uid=db_user.uid).first()\n premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None\n\n # getting all argument id\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)\n arg_array.append(db_argument)\n\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n\n if len(arg_array) == 1:\n # build one argument only\n return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style, support_counter_argument,\n author_uid)\n\n else:\n # get all pgroups and at last, the conclusion\n return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user,\n _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">') if with_html_tag else ''\n tag_conclusion = ('<' + tag_type + ' data-argumentation-type=\"argument\">') if with_html_tag else ''\n tag_end = ('</' + tag_type + '>') if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t)\n\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n\n intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous)\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n\n intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise)\n\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3)\n argument = tag_conclusion + argument + tag_end\n\n # P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool,\n attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid=None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print\n\n premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag,\n attack_type,\n colored_position,\n premises_text,\n conclusion_text)\n\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid,\n MarkedArgument.author_uid == author_uid).first()\n marked_element = db_marked is not None\n\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip()\n\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro,\n anonymous_style, rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion,\n support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element,\n conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang,\n premises, conclusion, is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n\n ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' '\n\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\ndef __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg):\n tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' '\n ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' '\n ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp\n ret_value += ' ' + premises\n return ret_value\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n # get all pgroups and at last, the conclusion\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n\n # grepping all arguments in the chain\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n\n conclusion = arg_array[0].get_conclusion_text()\n\n # html tags for framing\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' '\n\n if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True # user after system\n\n elif not anonymous_style: # user starts\n ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else ''\n tmp_users_opinion = False # system after user\n conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print\n\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n\n # just display the last premise group on undercuts, because the story is always saved in all bubbles\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.'\n\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat)\n\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn(\"Use PremiseGroup.get_text() instead.\", DeprecationWarning)\n\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang\n _t = Translator(lang)\n\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\ndef get_text_for_statement_uid(uid: int, colored_position=False):\n \"\"\"\n Returns text of statement with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n warnings.warn(\"Use Statement.get_text() or Statement.get_html() instead.\", DeprecationWarning)\n\n if not isinstance(uid, int):\n return None\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n if not db_statement:\n return None\n\n db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get(\n db_statement.textversion_uid)\n content = db_textversion.content\n\n while content.endswith(('.', '?', '!')):\n content = content[:-1]\n\n sb, se = '', ''\n if colored_position:\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type)\n se = '</{}>'.format(tag_type)\n\n return sb + content + se\n\n\ndef get_text_for_premise(uid: int, colored_position: bool = False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first()\n\n if not db_settings:\n return None\n\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(\n func.lower(User.public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n\n # check for html\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n\n # check for html\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='',\n bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False,\n nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n\n if uid is not 'now':\n content = pretty_print_options(content)\n\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n\n # check for users opinion\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid == db_user.uid).first()\n\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid == db_user.uid).first()\n\n is_users_opinion = db_marked is not None\n\n speech = {\n 'is_user': bubble_type is BubbleTypes.USER,\n 'is_system': bubble_type is BubbleTypes.SYSTEM,\n 'is_status': bubble_type is BubbleTypes.STATUS,\n 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable,\n 'is_author': is_author,\n 'id': uid if len(str(uid)) > 0 else uuid4().hex,\n 'bubble_url': bubble_url,\n 'message': content,\n 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info,\n 'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid,\n 'data_statement_uid': statement_uid,\n 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion,\n 'enemy': {\n 'avatar': gravatar_link,\n 'profile': profile,\n 'available': profile is not None\n }\n }\n\n votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid,\n statement_uid, speech, lang)\n\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n\n if not nickname:\n nickname = 'anonymous'\n\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first()\n\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user)\n\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n\n votecount_keys = defaultdict(lambda: \"{} {}.\".format(speech['votecounts'], _t.get(_.voteCountTextMore)))\n\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument). \\\n filter(ClickedArgument.argument_uid == argument_uid,\n ClickedArgument.is_up_vote == True,\n ClickedArgument.is_valid,\n ClickedArgument.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument). \\\n filter(MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid != db_user.uid).all()\n\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement). \\\n filter(ClickedStatement.statement_uid == statement_uid,\n ClickedStatement.is_up_vote == True,\n ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement). \\\n filter(MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid != db_user.uid).all()\n\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n # check conclusion of given arguments conclusion\n db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n # check premisegroup of given arguments conclusion\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n # check conclusion of given argument\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n\n # check premisegroup of given argument\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(\n TextVersion.uid.asc()).first() # TODO #432\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,\n Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(\n Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int = 80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id)\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(img_src, side)\n\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n\n if len(bubble_list) < length:\n return False\n\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n\n start_index = - length\n is_already_in = False\n for bubble in bubbles:\n\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or (text1 == text2)\n start_index += 1\n\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub(r'<.*?>', '', html))\n", "step-ids": [ 29, 31, 47, 55, 60 ] }
[ 29, 31, 47, 55, 60 ]
<|reserved_special_token_0|> class IsingModel: def __init__(self, image, J, rate, sigma): self.width = image.shape[0] self.height = image.shape[1] self._J = J self._rate = rate self._sigma = sigma self.image, self.logodds = self.presenting_image(image) <|reserved_special_token_0|> <|reserved_special_token_0|> def interaction_potentials(self, x, y): nbrs = self.neighbors(x, y) return sum(nbrs) def variational_inference(self, x, y): E = self._J * self.interaction_potentials(x, y) self.image[x, y] = (1 - self._rate) * self.image[x, y ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class IsingModel: def __init__(self, image, J, rate, sigma): self.width = image.shape[0] self.height = image.shape[1] self._J = J self._rate = rate self._sigma = sigma self.image, self.logodds = self.presenting_image(image) <|reserved_special_token_0|> def neighbors(self, x, y): nbrs = [] if x == 0: nbrs.append(self.image[self.width - 1, y]) else: nbrs.append(self.image[x - 1, y]) if x == self.width - 1: nbrs.append(self.image[0, y]) else: nbrs.append(self.image[x + 1, y]) if y == 0: nbrs.append(self.image[x, self.height - 1]) else: nbrs.append(self.image[x, y - 1]) if y == self.height - 1: nbrs.append(self.image[x, 0]) else: nbrs.append(self.image[x, y + 1]) return nbrs def interaction_potentials(self, x, y): nbrs = self.neighbors(x, y) return sum(nbrs) def variational_inference(self, x, y): E = self._J * self.interaction_potentials(x, y) self.image[x, y] = (1 - self._rate) * self.image[x, y ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class IsingModel: def __init__(self, image, J, rate, sigma): self.width = image.shape[0] self.height = image.shape[1] self._J = J self._rate = rate self._sigma = sigma self.image, self.logodds = self.presenting_image(image) def presenting_image(self, image): logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov= self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(), mean=-1, cov=self._sigma ** 2) logodds = np.reshape(logodds, image.shape) pr_plus1 = 1 / (1 + np.exp(-1 * logodds)) return 2 * pr_plus1 - 1, logodds def neighbors(self, x, y): nbrs = [] if x == 0: nbrs.append(self.image[self.width - 1, y]) else: nbrs.append(self.image[x - 1, y]) if x == self.width - 1: nbrs.append(self.image[0, y]) else: nbrs.append(self.image[x + 1, y]) if y == 0: nbrs.append(self.image[x, self.height - 1]) else: nbrs.append(self.image[x, y - 1]) if y == self.height - 1: nbrs.append(self.image[x, 0]) else: nbrs.append(self.image[x, y + 1]) return nbrs def interaction_potentials(self, x, y): nbrs = self.neighbors(x, y) return sum(nbrs) def variational_inference(self, x, y): E = self._J * self.interaction_potentials(x, y) self.image[x, y] = (1 - self._rate) * self.image[x, y ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class IsingModel: def __init__(self, image, J, rate, sigma): self.width = image.shape[0] self.height = image.shape[1] self._J = J self._rate = rate self._sigma = sigma self.image, self.logodds = self.presenting_image(image) def presenting_image(self, image): logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov= self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(), mean=-1, cov=self._sigma ** 2) logodds = np.reshape(logodds, image.shape) pr_plus1 = 1 / (1 + np.exp(-1 * logodds)) return 2 * pr_plus1 - 1, logodds def neighbors(self, x, y): nbrs = [] if x == 0: nbrs.append(self.image[self.width - 1, y]) else: nbrs.append(self.image[x - 1, y]) if x == self.width - 1: nbrs.append(self.image[0, y]) else: nbrs.append(self.image[x + 1, y]) if y == 0: nbrs.append(self.image[x, self.height - 1]) else: nbrs.append(self.image[x, y - 1]) if y == self.height - 1: nbrs.append(self.image[x, 0]) else: nbrs.append(self.image[x, y + 1]) return nbrs def interaction_potentials(self, x, y): nbrs = self.neighbors(x, y) return sum(nbrs) def variational_inference(self, x, y): E = self._J * self.interaction_potentials(x, y) self.image[x, y] = (1 - self._rate) * self.image[x, y ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y]) def denoising(image, iterations, rate, sigma, J=3): ising = IsingModel(image, J=J, rate=rate, sigma=sigma) for i in range(iterations): for x in range(image.shape[0]): for y in range(image.shape[1]): ising.variational_inference(x, y) return ising.image <|reserved_special_token_0|> <|reserved_special_token_1|> import numpy as np from scipy.stats import multivariate_normal from functions.io_data import read_data, write_data np.random.seed(0) class IsingModel(): def __init__(self, image, J, rate, sigma): self.width = image.shape[0] self.height = image.shape[1] self._J = J self._rate = rate self._sigma = sigma self.image, self.logodds = self.presenting_image(image) def presenting_image(self, image): logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov=self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(), mean=-1, cov=self._sigma ** 2) logodds = np.reshape(logodds, image.shape) pr_plus1 = 1 / (1 + np.exp(-1*logodds)) # sigmoid(logodds) # plus 1 -> +1 -> 1 / (1 + exp(logodds)) -> sigmoid(x) = 1 / (1 + exp{x}) return 2 * pr_plus1 - 1, logodds def neighbors(self, x, y): nbrs = [] if x == 0: nbrs.append(self.image[self.width - 1, y]) else: nbrs.append(self.image[x - 1, y]) if x == self.width - 1: nbrs.append(self.image[0, y]) else: nbrs.append(self.image[x + 1, y]) if y == 0: nbrs.append(self.image[x, self.height - 1]) else: nbrs.append(self.image[x, y - 1]) if y == self.height - 1: nbrs.append(self.image[x, 0]) else: nbrs.append(self.image[x, y + 1]) return nbrs def interaction_potentials(self, x, y): nbrs = self.neighbors(x, y) return sum(nbrs) def variational_inference(self, x, y): E = self._J * self.interaction_potentials(x, y) self.image[x, y] = (1 - self._rate) * self.image[x, y] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y]) def denoising(image, iterations, rate, sigma, J=3): ising = IsingModel(image, J=J, rate=rate, sigma=sigma) for i in range(iterations): for x in range(image.shape[0]): for y in range(image.shape[1]): ising.variational_inference(x, y) return ising.image if __name__ == "__main__": for img in range(1,5): print("Denoising for image " + str(img)) data, image = read_data("../a1/"+str(img)+"_noise.txt", True) print(data.shape) print(image.shape) image[image == 0] = -1 image[image == 255] = 1 iterations = 15 J = 3 sigma = 2 rate = 0.5 d_img = denoising(image, iterations=iterations, rate=rate, sigma=sigma) d_img[d_img >= 0] = 255 d_img[d_img < 0] = 0 print(d_img.shape) height = d_img.shape[0] width = d_img.shape[1] counter = 0 for i in range(0, width): for j in range(0, height): data[counter][2] = d_img[j][i][0] counter = counter + 1 write_data(data, "../output/vi/"+str(img)+"_denoise.txt") read_data("../output/vi/"+str(img)+"_denoise.txt", True, save=True, save_name="../output/vi/"+str(img)+"_denoise.png") print("Finished writing data. Please check "+str(img)+"_denoise.png \n")
flexible
{ "blob_id": "6aa74826f9ca0803fa8c1d5af1d4cec4980e2ce6", "index": 9064, "step-1": "<mask token>\n\n\nclass IsingModel:\n\n def __init__(self, image, J, rate, sigma):\n self.width = image.shape[0]\n self.height = image.shape[1]\n self._J = J\n self._rate = rate\n self._sigma = sigma\n self.image, self.logodds = self.presenting_image(image)\n <mask token>\n <mask token>\n\n def interaction_potentials(self, x, y):\n nbrs = self.neighbors(x, y)\n return sum(nbrs)\n\n def variational_inference(self, x, y):\n E = self._J * self.interaction_potentials(x, y)\n self.image[x, y] = (1 - self._rate) * self.image[x, y\n ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass IsingModel:\n\n def __init__(self, image, J, rate, sigma):\n self.width = image.shape[0]\n self.height = image.shape[1]\n self._J = J\n self._rate = rate\n self._sigma = sigma\n self.image, self.logodds = self.presenting_image(image)\n <mask token>\n\n def neighbors(self, x, y):\n nbrs = []\n if x == 0:\n nbrs.append(self.image[self.width - 1, y])\n else:\n nbrs.append(self.image[x - 1, y])\n if x == self.width - 1:\n nbrs.append(self.image[0, y])\n else:\n nbrs.append(self.image[x + 1, y])\n if y == 0:\n nbrs.append(self.image[x, self.height - 1])\n else:\n nbrs.append(self.image[x, y - 1])\n if y == self.height - 1:\n nbrs.append(self.image[x, 0])\n else:\n nbrs.append(self.image[x, y + 1])\n return nbrs\n\n def interaction_potentials(self, x, y):\n nbrs = self.neighbors(x, y)\n return sum(nbrs)\n\n def variational_inference(self, x, y):\n E = self._J * self.interaction_potentials(x, y)\n self.image[x, y] = (1 - self._rate) * self.image[x, y\n ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y])\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass IsingModel:\n\n def __init__(self, image, J, rate, sigma):\n self.width = image.shape[0]\n self.height = image.shape[1]\n self._J = J\n self._rate = rate\n self._sigma = sigma\n self.image, self.logodds = self.presenting_image(image)\n\n def presenting_image(self, image):\n logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov=\n self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(),\n mean=-1, cov=self._sigma ** 2)\n logodds = np.reshape(logodds, image.shape)\n pr_plus1 = 1 / (1 + np.exp(-1 * logodds))\n return 2 * pr_plus1 - 1, logodds\n\n def neighbors(self, x, y):\n nbrs = []\n if x == 0:\n nbrs.append(self.image[self.width - 1, y])\n else:\n nbrs.append(self.image[x - 1, y])\n if x == self.width - 1:\n nbrs.append(self.image[0, y])\n else:\n nbrs.append(self.image[x + 1, y])\n if y == 0:\n nbrs.append(self.image[x, self.height - 1])\n else:\n nbrs.append(self.image[x, y - 1])\n if y == self.height - 1:\n nbrs.append(self.image[x, 0])\n else:\n nbrs.append(self.image[x, y + 1])\n return nbrs\n\n def interaction_potentials(self, x, y):\n nbrs = self.neighbors(x, y)\n return sum(nbrs)\n\n def variational_inference(self, x, y):\n E = self._J * self.interaction_potentials(x, y)\n self.image[x, y] = (1 - self._rate) * self.image[x, y\n ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y])\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass IsingModel:\n\n def __init__(self, image, J, rate, sigma):\n self.width = image.shape[0]\n self.height = image.shape[1]\n self._J = J\n self._rate = rate\n self._sigma = sigma\n self.image, self.logodds = self.presenting_image(image)\n\n def presenting_image(self, image):\n logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov=\n self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(),\n mean=-1, cov=self._sigma ** 2)\n logodds = np.reshape(logodds, image.shape)\n pr_plus1 = 1 / (1 + np.exp(-1 * logodds))\n return 2 * pr_plus1 - 1, logodds\n\n def neighbors(self, x, y):\n nbrs = []\n if x == 0:\n nbrs.append(self.image[self.width - 1, y])\n else:\n nbrs.append(self.image[x - 1, y])\n if x == self.width - 1:\n nbrs.append(self.image[0, y])\n else:\n nbrs.append(self.image[x + 1, y])\n if y == 0:\n nbrs.append(self.image[x, self.height - 1])\n else:\n nbrs.append(self.image[x, y - 1])\n if y == self.height - 1:\n nbrs.append(self.image[x, 0])\n else:\n nbrs.append(self.image[x, y + 1])\n return nbrs\n\n def interaction_potentials(self, x, y):\n nbrs = self.neighbors(x, y)\n return sum(nbrs)\n\n def variational_inference(self, x, y):\n E = self._J * self.interaction_potentials(x, y)\n self.image[x, y] = (1 - self._rate) * self.image[x, y\n ] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y])\n\n\ndef denoising(image, iterations, rate, sigma, J=3):\n ising = IsingModel(image, J=J, rate=rate, sigma=sigma)\n for i in range(iterations):\n for x in range(image.shape[0]):\n for y in range(image.shape[1]):\n ising.variational_inference(x, y)\n return ising.image\n\n\n<mask token>\n", "step-5": "import numpy as np\nfrom scipy.stats import multivariate_normal\nfrom functions.io_data import read_data, write_data\n\nnp.random.seed(0)\n\nclass IsingModel():\n\n def __init__(self, image, J, rate, sigma):\n self.width = image.shape[0]\n self.height = image.shape[1]\n self._J = J\n self._rate = rate\n self._sigma = sigma\n\n self.image, self.logodds = self.presenting_image(image)\n\n def presenting_image(self, image):\n logodds = multivariate_normal.logpdf(image.flatten(), mean=+1, cov=self._sigma ** 2) - multivariate_normal.logpdf(image.flatten(), mean=-1, cov=self._sigma ** 2)\n logodds = np.reshape(logodds, image.shape)\n pr_plus1 = 1 / (1 + np.exp(-1*logodds)) # sigmoid(logodds) # plus 1 -> +1 -> 1 / (1 + exp(logodds)) -> sigmoid(x) = 1 / (1 + exp{x})\n return 2 * pr_plus1 - 1, logodds\n\n def neighbors(self, x, y):\n nbrs = []\n\n if x == 0:\n nbrs.append(self.image[self.width - 1, y])\n else:\n nbrs.append(self.image[x - 1, y])\n\n if x == self.width - 1:\n nbrs.append(self.image[0, y])\n else:\n nbrs.append(self.image[x + 1, y])\n\n if y == 0:\n nbrs.append(self.image[x, self.height - 1])\n else:\n nbrs.append(self.image[x, y - 1])\n\n if y == self.height - 1:\n nbrs.append(self.image[x, 0])\n else:\n nbrs.append(self.image[x, y + 1])\n\n return nbrs\n\n def interaction_potentials(self, x, y):\n nbrs = self.neighbors(x, y)\n return sum(nbrs)\n\n def variational_inference(self, x, y):\n E = self._J * self.interaction_potentials(x, y)\n self.image[x, y] = (1 - self._rate) * self.image[x, y] + self._rate * np.tanh(E + 0.5 * self.logodds[x, y])\n\n\ndef denoising(image, iterations, rate, sigma, J=3):\n ising = IsingModel(image, J=J, rate=rate, sigma=sigma)\n\n for i in range(iterations):\n for x in range(image.shape[0]):\n for y in range(image.shape[1]):\n ising.variational_inference(x, y)\n\n return ising.image\n\nif __name__ == \"__main__\":\n for img in range(1,5):\n print(\"Denoising for image \" + str(img))\n data, image = read_data(\"../a1/\"+str(img)+\"_noise.txt\", True)\n\n print(data.shape)\n print(image.shape)\n\n image[image == 0] = -1\n image[image == 255] = 1\n\n iterations = 15\n J = 3\n sigma = 2\n rate = 0.5\n\n d_img = denoising(image, iterations=iterations, rate=rate, sigma=sigma)\n\n d_img[d_img >= 0] = 255\n d_img[d_img < 0] = 0\n\n print(d_img.shape)\n height = d_img.shape[0]\n width = d_img.shape[1]\n counter = 0\n\n for i in range(0, width):\n for j in range(0, height):\n data[counter][2] = d_img[j][i][0]\n counter = counter + 1\n\n write_data(data, \"../output/vi/\"+str(img)+\"_denoise.txt\")\n read_data(\"../output/vi/\"+str(img)+\"_denoise.txt\", True, save=True, save_name=\"../output/vi/\"+str(img)+\"_denoise.png\")\n print(\"Finished writing data. Please check \"+str(img)+\"_denoise.png \\n\")", "step-ids": [ 4, 5, 6, 7, 10 ] }
[ 4, 5, 6, 7, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> class Solution(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> class Solution(object): def minCostClimbingStairs(self, cost): """ :type cost: List[int] :rtype: int """ result = [(0) for _ in range(len(cost))] result[0] = cost[0] result[1] = cost[1] for j in range(2, len(result)): result[j] = min(result[j - 1], result[j - 2]) + cost[j] return min(result[-2], result[-1]) <|reserved_special_token_0|> <|reserved_special_token_1|> class Solution(object): def minCostClimbingStairs(self, cost): """ :type cost: List[int] :rtype: int """ result = [(0) for _ in range(len(cost))] result[0] = cost[0] result[1] = cost[1] for j in range(2, len(result)): result[j] = min(result[j - 1], result[j - 2]) + cost[j] return min(result[-2], result[-1]) if __name__ == '__main__': solution = Solution() costs = [10, 15, 20] res = solution.minCostClimbingStairs(costs) print(res) <|reserved_special_token_1|> # 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。 # # 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。 # # 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。 # # 示例 1: # # 输入: cost = [10, 15, 20] # 输出: 15 # 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。 # # # 示例 2: # # 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1] # 输出: 6 # 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。 # # # 注意: # # # cost 的长度将会在 [2, 1000]。 # 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。 # # Related Topics 数组 动态规划 # leetcode submit region begin(Prohibit modification and deletion) class Solution(object): def minCostClimbingStairs(self, cost): """ :type cost: List[int] :rtype: int """ # f1 = f2 = 0 # for x in reversed(cost): # f1, f2 = x + min(f1, f2), f1 # return min(f1, f2) result = [0 for _ in range(len(cost))] result[0] = cost[0] result[1] = cost[1] for j in range(2, len(result)): result[j] = min(result[j - 1], result[j - 2]) + cost[j] return min(result[-2], result[-1]) if __name__ == '__main__': solution = Solution() costs = [10, 15, 20] res = solution.minCostClimbingStairs(costs) print(res)
flexible
{ "blob_id": "38363316cc9a8419a528bb78b9ad03682e24172d", "index": 9823, "step-1": "<mask token>\n", "step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n", "step-3": "class Solution(object):\n\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n result = [(0) for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\n<mask token>\n", "step-4": "class Solution(object):\n\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n result = [(0) for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\nif __name__ == '__main__':\n solution = Solution()\n costs = [10, 15, 20]\n res = solution.minCostClimbingStairs(costs)\n print(res)\n", "step-5": "# 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。\n#\n# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。\n#\n# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。\n#\n# 示例 1:\n#\n# 输入: cost = [10, 15, 20]\n# 输出: 15\n# 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。\n#\n#\n# 示例 2:\n#\n# 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\n# 输出: 6\n# 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。\n#\n#\n# 注意:\n#\n#\n# cost 的长度将会在 [2, 1000]。\n# 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。\n#\n# Related Topics 数组 动态规划\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n\n # f1 = f2 = 0\n # for x in reversed(cost):\n # f1, f2 = x + min(f1, f2), f1\n # return min(f1, f2)\n\n result = [0 for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\n\n\nif __name__ == '__main__':\n solution = Solution()\n costs = [10, 15, 20]\n res = solution.minCostClimbingStairs(costs)\n print(res)\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1,)) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1,)) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1,)) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np .int32, shape=(4,)) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = ( 'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)' ) train_re = ( 'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)' ) datasize_re = ( 'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)' ) queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d[ 'v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += tsize + vsize elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() elif self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append(self. data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() elif self.data['score_per_train']: self.data['score_per_train'].append(self.data[ 'score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and (ex_local and not filecmp.cmp(flocal, ftarget) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel() <|reserved_special_token_1|> <|reserved_special_token_0|> class BoardParser: <|reserved_special_token_0|> <|reserved_special_token_0|> class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1,)) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1,)) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1,)) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np .int32, shape=(4,)) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = ( 'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)' ) train_re = ( 'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)' ) datasize_re = ( 'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)' ) queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d[ 'v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += tsize + vsize elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() elif self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append(self. data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() elif self.data['score_per_train']: self.data['score_per_train'].append(self.data[ 'score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and (ex_local and not filecmp.cmp(flocal, ftarget) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel() <|reserved_special_token_1|> <|reserved_special_token_0|> class BoardParser: <|reserved_special_token_0|> def update(self): s = self.file.read() if len(s) == 200: self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10) self.file.seek(0) class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1,)) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1,)) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1,)) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np .int32, shape=(4,)) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = ( 'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)' ) train_re = ( 'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)' ) datasize_re = ( 'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)' ) queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d[ 'v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += tsize + vsize elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() elif self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append(self. data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() elif self.data['score_per_train']: self.data['score_per_train'].append(self.data[ 'score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and (ex_local and not filecmp.cmp(flocal, ftarget) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel() <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append('../') class BoardParser: def __init__(self): self.file = open('../board_output', 'rb') self.data = None def update(self): s = self.file.read() if len(s) == 200: self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10) self.file.seek(0) class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1,)) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1,)) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1,)) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np .int32, shape=(4,)) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = ( 'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)' ) train_re = ( 'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)' ) datasize_re = ( 'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)' ) queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d[ 'v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += tsize + vsize elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() elif self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append(self. data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() elif self.data['score_per_train']: self.data['score_per_train'].append(self.data[ 'score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and (ex_local and not filecmp.cmp(flocal, ftarget) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel() <|reserved_special_token_1|> import torch import re import sys import os import shutil import filecmp import numpy as np from collections import defaultdict from shutil import copyfile sys.path.append('../') class BoardParser: def __init__(self): self.file = open('../board_output', 'rb') self.data = None def update(self): s = self.file.read() if len(s) == 200: self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10) self.file.seek(0) class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, )) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, )) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, )) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, )) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = 'Episode:\s*(?P<episode>\d*)\s*' \ 'Score:\s*(?P<score>\d*)\s*' \ 'Lines Cleared:\s*(?P<lines>\d*)' train_re = 'Iteration:\s*(?P<iter>\d*)\s*' \ 'training loss:\s*(?P<t_loss>\d*\.\d*)\s*' \ 'validation loss:\s*(?P<v_loss>\d*\.\d*)±\s*(?P<v_loss_err>\d*\.\d*|nan)\s*' \ 'gradient norm:\s*(?P<g_norm>\d*\.\d*)' datasize_re = 'Training data size:\s*(?P<tsize>\d*)\s*' \ 'Validation data size:\s*(?P<vsize>\d*)' queue_re = 'Memory usage: (?P<filled>\d*) / (?P<size>\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d['v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) #print(d['g_norm']) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += (tsize + vsize) elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() else: if self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append( self.data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() else: if self.data['score_per_train']: self.data['score_per_train'].append( self.data['score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel()
flexible
{ "blob_id": "3668e8009dca4ea261bdfbd325331c338fdac5a9", "index": 627, "step-1": "<mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-2": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n <mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-3": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-4": "<mask token>\nsys.path.append('../')\n\n\nclass BoardParser:\n\n def __init__(self):\n self.file = open('../board_output', 'rb')\n self.data = None\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-5": "import torch\nimport re\nimport sys\nimport os\nimport shutil\nimport filecmp\nimport numpy as np\nfrom collections import defaultdict\nfrom shutil import copyfile\n\nsys.path.append('../')\n\n\nclass BoardParser:\n def __init__(self):\n\n self.file = open('../board_output', 'rb')\n\n self.data = None\n\n def update(self):\n\n s = self.file.read()\n\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n\n self.file.seek(0)\n\n\nclass StatusParser:\n def __init__(self):\n\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, ))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, ))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, ))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, ))\n\n\nclass Parser:\n def __init__(self, filename):\n\n self.filename = filename\n\n self.last_update = -1\n\n def check_update(self):\n\n latest_update = os.path.getmtime(self.filename)\n\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = 'Episode:\\s*(?P<episode>\\d*)\\s*' \\\n 'Score:\\s*(?P<score>\\d*)\\s*' \\\n 'Lines Cleared:\\s*(?P<lines>\\d*)'\n train_re = 'Iteration:\\s*(?P<iter>\\d*)\\s*' \\\n 'training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*' \\\n 'validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*' \\\n 'gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'\n datasize_re = 'Training data size:\\s*(?P<tsize>\\d*)\\s*' \\\n 'Validation data size:\\s*(?P<vsize>\\d*)'\n queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'\n\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d['v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n #print(d['g_norm'])\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += (tsize + vsize)\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n else:\n if self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(\n self.data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n else:\n if self.data['score_per_train']:\n self.data['score_per_train'].append(\n self.data['score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n\n if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local):\n copyfile(ftarget, flocal)\n\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n def __init__(self, distributional=True):\n\n self.last_update = -1\n\n self.data = {}\n\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-ids": [ 11, 12, 13, 15, 17 ] }
[ 11, 12, 13, 15, 17 ]
FILE = "Luke" NAME = "Luke Walker" NATIONALITY = "American" CLASS = "Manipulator" WEAPON = "" BIRTH = "" BIRTH_LOCATION = "" LETTER = "W" RECRUITMENT_ORDER = 10 SUMMARY = "" ABILITIES = "" BACKSTORY = "" HIGHLIGHTS = "" SUMMONS = ("Tonberry", "Grimnir", "Griever", "Starlet")
normal
{ "blob_id": "fa3ab879541c04e278317b11dd79e6e1b4319536", "index": 7586, "step-1": "<mask token>\n", "step-2": "FILE = 'Luke'\nNAME = 'Luke Walker'\nNATIONALITY = 'American'\nCLASS = 'Manipulator'\nWEAPON = ''\nBIRTH = ''\nBIRTH_LOCATION = ''\nLETTER = 'W'\nRECRUITMENT_ORDER = 10\nSUMMARY = ''\nABILITIES = ''\nBACKSTORY = ''\nHIGHLIGHTS = ''\nSUMMONS = 'Tonberry', 'Grimnir', 'Griever', 'Starlet'\n", "step-3": "FILE = \"Luke\"\n\nNAME = \"Luke Walker\"\n\nNATIONALITY = \"American\"\n\nCLASS = \"Manipulator\"\n\nWEAPON = \"\"\n\nBIRTH = \"\"\n\nBIRTH_LOCATION = \"\"\n\nLETTER = \"W\"\n\nRECRUITMENT_ORDER = 10\n\nSUMMARY = \"\"\n\nABILITIES = \"\"\n\nBACKSTORY = \"\"\n\nHIGHLIGHTS = \"\"\n\nSUMMONS = (\"Tonberry\", \"Grimnir\", \"Griever\", \"Starlet\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from rest_framework import generics from animals.models import Location from animals.serializers import LocationSerializer class LocationList(generics.ListCreateAPIView): queryset = Location.objects.all() serializer_class = LocationSerializer name = 'location-list' class LocationDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Location.objects.all() serializer_class = LocationSerializer name = 'location'
normal
{ "blob_id": "245e407c9e92b3ac34389a48fcef4fc1b349ea18", "index": 8252, "step-1": "<mask token>\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n", "step-2": "<mask token>\n\n\nclass LocationList(generics.ListCreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n", "step-3": "<mask token>\n\n\nclass LocationList(generics.ListCreateAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location-list'\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n", "step-4": "from rest_framework import generics\nfrom animals.models import Location\nfrom animals.serializers import LocationSerializer\n\n\nclass LocationList(generics.ListCreateAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location-list'\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]