code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
from psycopg2 import extras as ex import psycopg2 as pg import json import datetime import os from functools import reduce data_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务', 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime': '2020年09月02日 19:20', 'obtBidTime': '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\xa0\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)', 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室', 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅', 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone': '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone': '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文', 'agentPhone': '18690293446'} , {'projectName': '旅顺口医疗区医用氧气管道检修采购项目', 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院', 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime': '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\xa0\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)', 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone': '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号', 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName': '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix': '{"2.报价书氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44", "3.货物指标及要求氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1"}'} ] dict_tmp={} values_list = [] result = [] def processJson(dic): dicobj = json.loads(dic) print(dicobj) for k,v in dicobj.items(): dict_tmp = {} dict_tmp["file_name"] = k dict_tmp["urls"] =v print(k) print(v) result.append(dict_tmp) # dict_tmp.clear() return result def procesV(): for i in data_list: if "appendix" in i.keys(): appendix = i["appendix"] if appendix != "": fj = processJson(i["appendix"]) print(fj) fjs = json.dumps(fj,ensure_ascii=False) values_list.append(("testtest",fjs)) def prosql(): # values 后面直接%s hostname = '172.18.11.26' username = 'postgres' password = 'postgres_cnhis@#$' database = 'ai' conn = pg.connect(database=database, user=username, password=password, host=hostname, port="5432") cursor = conn.cursor() procesV() sql = '''insert into ho_sysnc_third_customer_data("purchased_project_name","fj_json") values %s ''' # 其中函数中的page_size参数默认为100,表示每个statement包含的最大条目数, # 如果传过来的argslist长度大于page_size,则该函数最多执行len(argslist)/page_size + 1次。 ex.execute_values(cursor, sql, values_list, page_size=10000) conn.commit() conn.close() cursor.close() if __name__ =='__main__': prosql() # procesV()
normal
{ "blob_id": "e9af8f7830be7db3ca57b0a24de48ef7fcb08d6c", "index": 8453, "step-1": "<mask token>\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n", "step-3": "<mask token>\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务',\n 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime':\n '2020年09月02日 19:20', 'obtBidTime':\n '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)'\n , 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室',\n 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅',\n 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone':\n '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone':\n '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文',\n 'agentPhone': '18690293446'}, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目',\n 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院',\n 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime':\n '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)'\n , 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone':\n '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号',\n 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName':\n '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone':\n '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix':\n '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'\n }]\ndict_tmp = {}\nvalues_list = []\nresult = []\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n", "step-4": "from psycopg2 import extras as ex\nimport psycopg2 as pg\nimport json\nimport datetime\nimport os\nfrom functools import reduce\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务',\n 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime':\n '2020年09月02日 19:20', 'obtBidTime':\n '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)'\n , 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室',\n 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅',\n 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone':\n '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone':\n '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文',\n 'agentPhone': '18690293446'}, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目',\n 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院',\n 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime':\n '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)'\n , 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone':\n '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号',\n 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName':\n '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone':\n '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix':\n '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'\n }]\ndict_tmp = {}\nvalues_list = []\nresult = []\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n", "step-5": "from psycopg2 import extras as ex\nimport psycopg2 as pg\nimport json\nimport datetime\nimport os\nfrom functools import reduce\n\n\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务', 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime': '2020年09月02日 19:20', 'obtBidTime': '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)', 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室', 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅', 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone': '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone': '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文', 'agentPhone': '18690293446'}\n , {'projectName': '旅顺口医疗区医用氧气管道检修采购项目', 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院', 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime': '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)', 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone': '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号', 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName': '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix': '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'}\n ]\n\n\ndict_tmp={}\nvalues_list = []\nresult = []\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k,v in dicobj.items():\n dict_tmp = {}\n dict_tmp[\"file_name\"] = k\n dict_tmp[\"urls\"] =v\n print(k)\n print(v)\n result.append(dict_tmp)\n # dict_tmp.clear()\n return result\n\ndef procesV():\n for i in data_list:\n if \"appendix\" in i.keys():\n appendix = i[\"appendix\"]\n if appendix != \"\":\n fj = processJson(i[\"appendix\"])\n print(fj)\n fjs = json.dumps(fj,ensure_ascii=False)\n values_list.append((\"testtest\",fjs))\n\ndef prosql():\n # values 后面直接%s\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password, host=hostname, port=\"5432\")\n cursor = conn.cursor()\n procesV()\n sql = '''insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n '''\n # 其中函数中的page_size参数默认为100,表示每个statement包含的最大条目数,\n # 如果传过来的argslist长度大于page_size,则该函数最多执行len(argslist)/page_size + 1次。\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n\n conn.close()\n cursor.close()\n\n\n\n\nif __name__ =='__main__':\n prosql()\n # procesV()\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt # Create your views here. from projects.models import Project from django.db import connection from .utils import namedtuplefetchall from django.http import JsonResponse from django.contrib import messages import json from django.views.decorators.csrf import csrf_exempt from .utils import send_mail from DBMS import settings from passlib.hash import pbkdf2_sha256 as encrypto # Create your views here. @login_required @csrf_exempt def social(request): if request.method == "POST": data = request.POST project_id = int(json.loads(data.get('projid'))) head = data.get('head') head = json.loads(head) subhead = json.loads(data.get('subh')) content = json.loads(data.get('cont')) obtained = json.loads(data.get('pass')) with connection.cursor() as curr: curr.execute("SELECT manager_id,customer_id FROM socialMedia where project_id=%s",[project_id]) rec_id = namedtuplefetchall(curr) manager_id = rec_id[0].manager_id customer_id = rec_id[0].customer_id print("SENDING") with connection.cursor() as curr: curr.execute("select contact from customer where customer_id = %s",[customer_id]) email = namedtuplefetchall(curr) customer_email = email[0].contact # Rename the email field with customer_email to send to customers when we have actual data pwd = settings.EMAIL_HOST_PASSWORD if encrypto.verify(obtained,pwd) == True: #print("asjdhasd") send_mail(head,subhead+'\n'+content,'Gauri Baraskar','[email protected]',settings.EMAIL_HOST_USER,obtained) else: messages.warning(request,"Wrong Password Entered") return JsonResponse(1,safe=False) else: with connection.cursor() as curr: curr.execute("select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id",[request.user.id]) res = namedtuplefetchall(curr) return render(request, 'social/index.html', {'social': res})
normal
{ "blob_id": "c2839046592469dfae7526f72be947126960ba19", "index": 621, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == 'POST':\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\n 'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'\n , [project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print('SENDING')\n with connection.cursor() as curr:\n curr.execute('select contact from customer where customer_id = %s',\n [customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained, pwd) == True:\n send_mail(head, subhead + '\\n' + content, 'Gauri Baraskar',\n '[email protected]', settings.EMAIL_HOST_USER,\n obtained)\n else:\n messages.warning(request, 'Wrong Password Entered')\n return JsonResponse(1, safe=False)\n else:\n with connection.cursor() as curr:\n curr.execute(\n 'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'\n , [request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n", "step-3": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom projects.models import Project\nfrom django.db import connection\nfrom .utils import namedtuplefetchall\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .utils import send_mail\nfrom DBMS import settings\nfrom passlib.hash import pbkdf2_sha256 as encrypto\n\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == 'POST':\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\n 'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'\n , [project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print('SENDING')\n with connection.cursor() as curr:\n curr.execute('select contact from customer where customer_id = %s',\n [customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained, pwd) == True:\n send_mail(head, subhead + '\\n' + content, 'Gauri Baraskar',\n '[email protected]', settings.EMAIL_HOST_USER,\n obtained)\n else:\n messages.warning(request, 'Wrong Password Entered')\n return JsonResponse(1, safe=False)\n else:\n with connection.cursor() as curr:\n curr.execute(\n 'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'\n , [request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n", "step-4": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\nfrom projects.models import Project\nfrom django.db import connection\nfrom .utils import namedtuplefetchall\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .utils import send_mail\n\nfrom DBMS import settings\n\nfrom passlib.hash import pbkdf2_sha256 as encrypto\n\n# Create your views here.\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == \"POST\":\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\"SELECT manager_id,customer_id FROM socialMedia where project_id=%s\",[project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print(\"SENDING\")\n\n with connection.cursor() as curr:\n curr.execute(\"select contact from customer where customer_id = %s\",[customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n\n # Rename the email field with customer_email to send to customers when we have actual data\n\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained,pwd) == True:\n #print(\"asjdhasd\")\n send_mail(head,subhead+'\\n'+content,'Gauri Baraskar','[email protected]',settings.EMAIL_HOST_USER,obtained)\n else:\n messages.warning(request,\"Wrong Password Entered\")\n return JsonResponse(1,safe=False)\n\n else:\n with connection.cursor() as curr:\n curr.execute(\"select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id\",[request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n\n\n\n\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from typing import List, Tuple import pytest def fit_transform(*args: str) -> List[Tuple[str, List[int]]]: if len(args) == 0: raise TypeError('expected at least 1 arguments, got 0') categories = args if isinstance(args[0], str) else list(args[0]) uniq_categories = set(categories) bin_format = f'{{0:0{len(uniq_categories)}b}}' seen_categories = dict() transformed_rows = [] for cat in categories: bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories))) seen_categories.setdefault(cat, list(bin_view_cat)) transformed_rows.append((cat, seen_categories[cat])) return transformed_rows def test_str_fit_transformr(): assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [ ('Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]), ('London', [1, 0, 0]), ] def test_int_fit_str_transformr(): assert fit_transform([1, 2, 1, 3]) == [ (1, [0, 0, 1]), (2, [0, 1, 0]), (1, [0, 0, 1]), (3, [1, 0, 0]), ] # чтобы проверить, что код вызывает исключение, нужно использовать менеджер контекста pytest.raises def test_error_type_fit_transformr(): with pytest.raises(TypeError): fit_transform(1) @pytest.fixture() def randomize(): from random import randint return [randint(0, 9) for _ in range(randint(0, 10))] def test_intv2_fit_transformr(randomize): print(randomize) result = fit_transform(randomize) assert (len(result) == len(randomize))
normal
{ "blob_id": "b236abaa5e206a8244083ee7f9dcdb16741cb99d", "index": 3072, "step-1": "<mask token>\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\n<mask token>\n\n\[email protected]()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\[email protected]()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef fit_transform(*args: str) ->List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n seen_categories = dict()\n transformed_rows = []\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(\n seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n return transformed_rows\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\[email protected]()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\ndef test_intv2_fit_transformr(randomize):\n print(randomize)\n result = fit_transform(randomize)\n assert len(result) == len(randomize)\n", "step-4": "from typing import List, Tuple\nimport pytest\n\n\ndef fit_transform(*args: str) ->List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n seen_categories = dict()\n transformed_rows = []\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(\n seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n return transformed_rows\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\[email protected]()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\ndef test_intv2_fit_transformr(randomize):\n print(randomize)\n result = fit_transform(randomize)\n assert len(result) == len(randomize)\n", "step-5": "from typing import List, Tuple\r\nimport pytest\r\n\r\n\r\ndef fit_transform(*args: str) -> List[Tuple[str, List[int]]]:\r\n if len(args) == 0:\r\n raise TypeError('expected at least 1 arguments, got 0')\r\n\r\n categories = args if isinstance(args[0], str) else list(args[0])\r\n uniq_categories = set(categories)\r\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\r\n\r\n seen_categories = dict()\r\n transformed_rows = []\r\n\r\n for cat in categories:\r\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))\r\n seen_categories.setdefault(cat, list(bin_view_cat))\r\n transformed_rows.append((cat, seen_categories[cat]))\r\n\r\n return transformed_rows\r\n\r\n\r\ndef test_str_fit_transformr():\r\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [\r\n ('Moscow', [0, 0, 1]),\r\n ('New York', [0, 1, 0]),\r\n ('Moscow', [0, 0, 1]),\r\n ('London', [1, 0, 0]),\r\n ]\r\n\r\n\r\ndef test_int_fit_str_transformr():\r\n assert fit_transform([1, 2, 1, 3]) == [\r\n (1, [0, 0, 1]),\r\n (2, [0, 1, 0]),\r\n (1, [0, 0, 1]),\r\n (3, [1, 0, 0]),\r\n ]\r\n\r\n\r\n# чтобы проверить, что код вызывает исключение, нужно использовать менеджер контекста pytest.raises\r\ndef test_error_type_fit_transformr():\r\n with pytest.raises(TypeError):\r\n fit_transform(1)\r\n\r\n\r\[email protected]()\r\ndef randomize():\r\n from random import randint\r\n return [randint(0, 9) for _ in range(randint(0, 10))]\r\n\r\n\r\ndef test_intv2_fit_transformr(randomize):\r\n print(randomize)\r\n result = fit_transform(randomize)\r\n assert (len(result) == len(randomize))\r\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
# We don't need no stinking models but django likes this file to be there if you are an app
normal
{ "blob_id": "a1304f290e0346e7aa2e22d9c2d3e7f735b1e8e7", "index": 96, "step-1": "\n# We don't need no stinking models but django likes this file to be there if you are an app\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 1 ] }
[ 1 ]
from .mail_utils import send_mail from .request_utils import get_host_url
normal
{ "blob_id": "74b0ccb5193380ce596313d1ac3f898ff1fdd2f3", "index": 930, "step-1": "<mask token>\n", "step-2": "from .mail_utils import send_mail\nfrom .request_utils import get_host_url\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import numpy as np import initialization as init import evaluation as eval import selection as sel import recombination as rec import mutation as mut initialize = init.permutation evaluate = eval.custom select = sel.rank_based mutate = mut.swap reproduce = rec.pairwise crossover = rec.order replace = sel.rank_based params = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size': 150, 'len_gene': 100, 'mut_rate': 0.5} population = initialize(params) population = evaluate(params, population) for gen in range(params['gens']): parents = select(population, params['n_pars']) offspring = reproduce(params, parents, crossover) offspring = mutate(params, offspring) offspring = evaluate(params, offspring) population = replace(np.concatenate((population, offspring), axis=0), params['pop_size']) print(gen)
normal
{ "blob_id": "5eab41a2ef536365bab6f6b5ad97efb8d26d7687", "index": 4456, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n", "step-3": "<mask token>\ninitialize = init.permutation\nevaluate = eval.custom\nselect = sel.rank_based\nmutate = mut.swap\nreproduce = rec.pairwise\ncrossover = rec.order\nreplace = sel.rank_based\nparams = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size':\n 150, 'len_gene': 100, 'mut_rate': 0.5}\npopulation = initialize(params)\npopulation = evaluate(params, population)\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n", "step-4": "import numpy as np\nimport initialization as init\nimport evaluation as eval\nimport selection as sel\nimport recombination as rec\nimport mutation as mut\ninitialize = init.permutation\nevaluate = eval.custom\nselect = sel.rank_based\nmutate = mut.swap\nreproduce = rec.pairwise\ncrossover = rec.order\nreplace = sel.rank_based\nparams = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size':\n 150, 'len_gene': 100, 'mut_rate': 0.5}\npopulation = initialize(params)\npopulation = evaluate(params, population)\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import logging from django.contrib.auth.models import User import json from django.http import HttpResponse from enumfields.fields import EnumFieldMixin from Api.models import Status logger = logging.getLogger() logger.setLevel(logging.INFO) def check_cookie(request): # Post.objects.all().delete() result = { "status": True } try: user_id = request.GET.get('user_id') user = User.objects.get(pk=user_id) cookie_status = user.profile.cookie_status if cookie_status is Status.DEACTIVATE: result['cookie_status'] = "0" elif cookie_status is Status.ACTIVATE: result['cookie_status'] = "1" elif cookie_status is Status.EMPTY: result['cookie_status'] = "2" elif cookie_status is Status.WARNING: result['cookie_status'] = "3" elif cookie_status is Status.ERROR: result['cookie_status'] = "4" except Exception as e: logger.info(e) result["status"] = False return HttpResponse(json.dumps(result), content_type="application/json")
normal
{ "blob_id": "2bc3b0df720788e43da3d9c28adb22b3b1be8c58", "index": 5002, "step-1": "<mask token>\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-2": "<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-3": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-4": "import logging\nfrom django.contrib.auth.models import User\nimport json\nfrom django.http import HttpResponse\nfrom enumfields.fields import EnumFieldMixin\nfrom Api.models import Status\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-5": "import logging\nfrom django.contrib.auth.models import User\nimport json\nfrom django.http import HttpResponse\nfrom enumfields.fields import EnumFieldMixin\n\nfrom Api.models import Status\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n # Post.objects.all().delete()\n result = {\n \"status\": True\n }\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = \"0\"\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = \"1\"\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = \"2\"\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = \"3\"\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = \"4\"\n except Exception as e:\n logger.info(e)\n result[\"status\"] = False\n return HttpResponse(json.dumps(result), content_type=\"application/json\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env python import cgitb import cgi import pymysql form = cgi.FieldStorage() c.execute("SELECT * FROM example") recs = c.fetchall() records1 = """ <body> <table> <tbody> <tr> <th>Full Name</th> <th>Average Score</th> </tr>""" records_dyn = [ f"<tr><td>{name}</td><td>{avg}</td></tr>" for recs[1], recs[2] in recs] records2 = """ <form method="POST" action="index.py"> <input type="submit" value="Go Back"> </form> </body> </table> </body> </html>""" print("Content-Type:text/html; charset=utf-8") print() for i in records1.split("\n"): print(i) for i in records_dyn: print(i) for i in records1.split("\n"): print(i)
normal
{ "blob_id": "b5fee01582a28085983c56b9c266ef7fd5c3c927", "index": 5132, "step-1": "<mask token>\n", "step-2": "<mask token>\nc.execute('SELECT * FROM example')\n<mask token>\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n", "step-3": "<mask token>\nform = cgi.FieldStorage()\nc.execute('SELECT * FROM example')\nrecs = c.fetchall()\nrecords1 = \"\"\"\n<body>\n\t<table>\n\t\t<tbody>\n\t\t\t<tr>\n\t\t\t\t<th>Full Name</th>\n\t\t\t\t<th>Average Score</th>\n\t\t\t</tr>\"\"\"\nrecords_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[\n 2] in recs]\nrecords2 = \"\"\"\n<form method=\"POST\" action=\"index.py\">\n<input type=\"submit\" value=\"Go Back\">\n</form>\n\t\t</body>\n\t</table>\n</body>\n</html>\"\"\"\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n", "step-4": "import cgitb\nimport cgi\nimport pymysql\nform = cgi.FieldStorage()\nc.execute('SELECT * FROM example')\nrecs = c.fetchall()\nrecords1 = \"\"\"\n<body>\n\t<table>\n\t\t<tbody>\n\t\t\t<tr>\n\t\t\t\t<th>Full Name</th>\n\t\t\t\t<th>Average Score</th>\n\t\t\t</tr>\"\"\"\nrecords_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[\n 2] in recs]\nrecords2 = \"\"\"\n<form method=\"POST\" action=\"index.py\">\n<input type=\"submit\" value=\"Go Back\">\n</form>\n\t\t</body>\n\t</table>\n</body>\n</html>\"\"\"\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n", "step-5": "#!/usr/bin/env python\r\nimport cgitb\r\nimport cgi\r\nimport pymysql\r\n\r\nform = cgi.FieldStorage()\r\nc.execute(\"SELECT * FROM example\")\r\nrecs = c.fetchall()\r\nrecords1 = \"\"\"\r\n<body>\r\n\t<table>\r\n\t\t<tbody>\r\n\t\t\t<tr>\r\n\t\t\t\t<th>Full Name</th>\r\n\t\t\t\t<th>Average Score</th>\r\n\t\t\t</tr>\"\"\"\r\nrecords_dyn = [\r\n f\"<tr><td>{name}</td><td>{avg}</td></tr>\" for recs[1], recs[2] in recs]\r\nrecords2 = \"\"\"\r\n<form method=\"POST\" action=\"index.py\">\r\n<input type=\"submit\" value=\"Go Back\">\r\n</form>\r\n\t\t</body>\r\n\t</table>\r\n</body>\r\n</html>\"\"\"\r\nprint(\"Content-Type:text/html; charset=utf-8\")\r\nprint()\r\nfor i in records1.split(\"\\n\"):\r\n print(i)\r\nfor i in records_dyn:\r\n print(i)\r\nfor i in records1.split(\"\\n\"):\r\n print(i)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
'''import pyttsx3 #engine = pyttsx3.init() #Conficuração das vozes #voices = engine.getProperty('voices') #engine.setProperty('voice', voices[2].id) engine=pyttsx3.init() voices=engine.getProperty('voices') engine.setProperty('voice',voices[3].id) #Falar texto engine.say('Olá meu nome é Jarvis. Sou uma inteligência artificial') engine.runAndWait() #print(voices) #Printa na tela todas as vozes disponíveis''' '''for voice in voices: print("Voice: %s" % voice.name) print(" - ID: %s" % voice.id) print(" - Languages: %s" % voice.languages) print(" - Gender: %s" % voice.gender) print(" - Age: %s" % voice.age) print("\n")'''
normal
{ "blob_id": "d9bf58dc76d4e8d7146fac3bb2bdfb538ebf78a5", "index": 7102, "step-1": "<mask token>\n", "step-2": "'''import pyttsx3\n\n#engine = pyttsx3.init()\n\n#Conficuração das vozes\n#voices = engine.getProperty('voices')\n#engine.setProperty('voice', voices[2].id)\n\nengine=pyttsx3.init()\n\nvoices=engine.getProperty('voices')\nengine.setProperty('voice',voices[3].id)\n\n#Falar texto\nengine.say('Olá meu nome é Jarvis. Sou uma inteligência artificial')\nengine.runAndWait()\n#print(voices)\n\n#Printa na tela todas as vozes disponíveis'''\n'''for voice in voices:\n print(\"Voice: %s\" % voice.name)\n print(\" - ID: %s\" % voice.id)\n print(\" - Languages: %s\" % voice.languages)\n print(\" - Gender: %s\" % voice.gender)\n print(\" - Age: %s\" % voice.age)\n print(\"\\n\")'''\n\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# -*- coding:Utf-8 -*- from .game_action_manager import GameActionManager from .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager
normal
{ "blob_id": "48294209d51fbe4dfb2a5130311a10c8a1dd027c", "index": 9237, "step-1": "<mask token>\n", "step-2": "from .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n", "step-3": "# -*- coding:Utf-8 -*-\n\n\nfrom .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# C8-06 p.146 Write city_country() function that takes name city and country # Print city name then the country the city is in. call 3 times with differet pairs. def city_country(city, country): """Name a city and the country it resides in seperated by a comma.""" print(f'"{city.title()}, {country.title()}"\n') city_country("St. John's", 'Canada') city_country("ottawa", "Ontario") city_country('cairo', 'egypt')
normal
{ "blob_id": "2866ecf69969b445fb15740a507ddecb1dd1762d", "index": 3395, "step-1": "<mask token>\n", "step-2": "def city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\n<mask token>\n", "step-3": "def city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\ncity_country(\"St. John's\", 'Canada')\ncity_country('ottawa', 'Ontario')\ncity_country('cairo', 'egypt')\n", "step-4": "# C8-06 p.146 Write city_country() function that takes name city and country\n# Print city name then the country the city is in. call 3 times with differet pairs.\n\ndef city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\ncity_country(\"St. John's\", 'Canada')\n\ncity_country(\"ottawa\", \"Ontario\")\n\ncity_country('cairo', 'egypt')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/oasis/scratch/csd181/mdburns/python/bin/python import sys import pickle import base64 from process import process import multiprocessing as mp EPOCH_LENGTH=.875 EPOCH_OFFSET=.125 NUM_FOLDS=5 if __name__ == "__main__": mp.freeze_support() p= mp.Pool(2) for instr in sys.stdin: this_key='' sys.stderr.write('mapper: begin receiving data\n') instr = instr.strip() keystr, valstr = instr.split('\t', 1) sys.stderr.write('mapper: key_string ' + keystr + '\n') this_key, this_id = keystr.split('.', 1) sys.stderr.write('mapper: key is ' + keystr +'\n') sys.stderr.write('mapper: this_key is ' + this_key +'\n') sys.stderr.write('mapper: this_id is ' + this_id +'\n') v = pickle.loads(base64.decodestring(valstr)) y = v[0].reshape((-1,1)) eeg = pickle.loads(v[1]) try: rov = process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p) result = {'id':this_id, 'rov':rov } except: sys.stderr.write('mapper: process failed\n') continue this_val = base64.b64encode(pickle.dumps(result, protocol=2)) if this_key != '': print '%s\t%s' % (this_key, this_val) p.close() sys.stderr.write('mapper: good job\n')
normal
{ "blob_id": "e477a59e86cfeb3f26db1442a05d0052a45c42ff", "index": 6397, "step-1": "#!/oasis/scratch/csd181/mdburns/python/bin/python\nimport sys\nimport pickle\nimport base64\nfrom process import process\nimport multiprocessing as mp\n\nEPOCH_LENGTH=.875\nEPOCH_OFFSET=.125\nNUM_FOLDS=5\n\nif __name__ == \"__main__\":\n mp.freeze_support()\n\np= mp.Pool(2)\n\nfor instr in sys.stdin:\n this_key=''\n sys.stderr.write('mapper: begin receiving data\\n')\n instr = instr.strip()\n keystr, valstr = instr.split('\\t', 1)\n sys.stderr.write('mapper: key_string ' + keystr + '\\n')\n\n this_key, this_id = keystr.split('.', 1)\n sys.stderr.write('mapper: key is ' + keystr +'\\n')\n sys.stderr.write('mapper: this_key is ' + this_key +'\\n')\n sys.stderr.write('mapper: this_id is ' + this_id +'\\n')\n\n v = pickle.loads(base64.decodestring(valstr))\n y = v[0].reshape((-1,1))\n eeg = pickle.loads(v[1])\n\n try:\n rov = process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p)\n result = {'id':this_id, 'rov':rov }\n\n except:\n sys.stderr.write('mapper: process failed\\n')\n continue\n\n this_val = base64.b64encode(pickle.dumps(result, protocol=2))\n\n if this_key != '':\n print '%s\\t%s' % (this_key, this_val)\n\np.close()\nsys.stderr.write('mapper: good job\\n')", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np import skimage def preprocess_img(img, size): img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3) img = skimage.transform.resize(img, size) img = skimage.color.rgb2gray(img) return img # data = minerl.data.make("MineRLNavigateDense-v0", data_dir="../dataset/navigate") # # # Iterate through a single epoch gathering sequences of at most 32 steps # for current_state, action, reward, next_state, done in data.sarsd_iter(num_epochs=1, max_sequence_len=32): # # Print the POV @ the first step of the sequence # print(current_state['pov'][0]) # # # Print the final reward pf the sequence! # print(reward[-1]) # # # Check if final (next_state) is terminal. # print(done[-1]) # # # ... do something with the data. # print("At the end of trajectories the length can be < max_sequence_len", len(reward))
normal
{ "blob_id": "9706b9ba81f41b131c364a16bb17a0c1e31e3a04", "index": 6608, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n return img\n", "step-3": "import numpy as np\nimport skimage\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n return img\n", "step-4": "import numpy as np\nimport skimage\n\n\ndef preprocess_img(img, size):\n img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)\n img = skimage.transform.resize(img, size)\n img = skimage.color.rgb2gray(img)\n\n return img\n\n# data = minerl.data.make(\"MineRLNavigateDense-v0\", data_dir=\"../dataset/navigate\")\n#\n# # Iterate through a single epoch gathering sequences of at most 32 steps\n# for current_state, action, reward, next_state, done in data.sarsd_iter(num_epochs=1, max_sequence_len=32):\n# # Print the POV @ the first step of the sequence\n# print(current_state['pov'][0])\n#\n# # Print the final reward pf the sequence!\n# print(reward[-1])\n#\n# # Check if final (next_state) is terminal.\n# print(done[-1])\n#\n# # ... do something with the data.\n# print(\"At the end of trajectories the length can be < max_sequence_len\", len(reward))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# !/usr/bin/env python # -*- coding: utf-8 -*- # tail -2 hightemp.txt import sys with open(sys.argv[1]) as f: lines = f.readlines(); n = sys.argv[2]; print "".join(lines[len(lines)-int(n):])
normal
{ "blob_id": "a1710ee228a432db92c9586ddff0bfcad1f434a8", "index": 2088, "step-1": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# tail -2 hightemp.txt\n\n\nimport sys\n\nwith open(sys.argv[1]) as f:\n lines = f.readlines();\n\nn = sys.argv[2];\n\nprint \"\".join(lines[len(lines)-int(n):])", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from datetime import datetime import time from os import system import RPi.GPIO as GPIO import firebase_admin from firebase_admin import credentials from firebase_admin import db GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(21, GPIO.OUT) # este pin es de salida carro GPIO.setup(26, GPIO.OUT) # este pin es de salida carro GPIO.setup(19, GPIO.OUT) # este pin es de salida carro GPIO.setup(13, GPIO.OUT) # este pin es de salida carro GPIO.setup(6, GPIO.OUT) # este pin es de salida carro GPIO.setup(5, GPIO.OUT) # este pin es de salida carro GPIO.setup(11, GPIO.OUT) # este pin es de salida carro GPIO.setup(20, GPIO.IN) #Este pin es una entrada carro pequeno GPIO.setup(16, GPIO.IN) #Este pin es una entrada carro grande PATH_CRED = '/home/pi/Desktop/cred.json' URL_DB = 'https://arquiii-default-rtdb.firebaseio.com/' cred = credentials.Certificate(PATH_CRED) firebase_admin.initialize_app(cred, { 'databaseURL': URL_DB }) REF = db.reference("/") REF.set({ 'Proceso': { } }) REF = db.reference("/Vehiculos") while True: tiempo = datetime.now() #Si hay un 1 en el pin 20 if GPIO.input(20): tiempoE = 5 #tiempo que va a cambiar por estacion if GPIO.input(20): tamano = "Pequeno" elif GPIO.input(20) and GPIO.input(16): tamano = "Grande" else: tamano = "Mediano" print("Se ha detectado un automovil de tamano",tamano) REF.push({ "Recepcion": str(tiempo), "Tamano": tamano, }) if (tiempo == 5): print("Activacion de agua... ") tiempo += 5 GPIO.output(26, True) print("Desactivacion de agua...") tiempo = datetime.now() REF.push({ "Tiempo agua": str(tiempo), }) GPIO.output(26, False) elif (tiempo == 10): print("Activacion de rocio de shampoo... ") tiempo += 5 GPIO.output(19, True) print("Desactivacion de rocio de shampoo...") tiempo = datetime.now() REF.push({ "Tiempo rocio": str(tiempo), }) GPIO.output(19, False) elif (tiempo == 15): print("Activacion de rodillos de limpieza... ") tiempo += 5 GPIO.output(13, True) print("Desactivacion de rodillos de limpieza...") tiempo = datetime.now() REF.push({ "Tiempo rodillo": str(tiempo), }) GPIO.output(13, False) elif (tiempo == 20): print("Activacion de escobas de limpieza ") tiempo += 5 GPIO.output(6, True) print("Desactivacion de escobas de limpieza...") tiempo = datetime.now() REF.push({ "Tiempo escoba": str(tiempo), }) GPIO.output(6, False) elif (tiempo == 25): print("Activacion de rocio de agua 2nda vez ") tiempo += 5 GPIO.output(5, True) print("Desactivacion de rocio de agua 2nda vez...") tiempo = datetime.now() REF.push({ "Tiempo agua 2nda": str(tiempo), }) GPIO.output(5, False) elif (tiempo == 30): print("Activacion de rodillos de secado") tiempo += 5 GPIO.output(11, True) print("Desactivacion de rodillos de secado...") tiempo = datetime.now() REF.push({ "Tiempo rodillos": str(tiempo), }) GPIO.output(11, False) GPIO.cleanup()
normal
{ "blob_id": "0972bd1241ad91f54f8dfde6327ee226c27bf2ca", "index": 9747, "step-1": "<mask token>\n", "step-2": "<mask token>\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.OUT)\nGPIO.setup(26, GPIO.OUT)\nGPIO.setup(19, GPIO.OUT)\nGPIO.setup(13, GPIO.OUT)\nGPIO.setup(6, GPIO.OUT)\nGPIO.setup(5, GPIO.OUT)\nGPIO.setup(11, GPIO.OUT)\nGPIO.setup(20, GPIO.IN)\nGPIO.setup(16, GPIO.IN)\n<mask token>\nfirebase_admin.initialize_app(cred, {'databaseURL': URL_DB})\n<mask token>\nREF.set({'Proceso': {}})\n<mask token>\nwhile True:\n tiempo = datetime.now()\n if GPIO.input(20):\n tiempoE = 5\n if GPIO.input(20):\n tamano = 'Pequeno'\n elif GPIO.input(20) and GPIO.input(16):\n tamano = 'Grande'\n else:\n tamano = 'Mediano'\n print('Se ha detectado un automovil de tamano', tamano)\n REF.push({'Recepcion': str(tiempo), 'Tamano': tamano})\n if tiempo == 5:\n print('Activacion de agua... ')\n tiempo += 5\n GPIO.output(26, True)\n print('Desactivacion de agua...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua': str(tiempo)})\n GPIO.output(26, False)\n elif tiempo == 10:\n print('Activacion de rocio de shampoo... ')\n tiempo += 5\n GPIO.output(19, True)\n print('Desactivacion de rocio de shampoo...')\n tiempo = datetime.now()\n REF.push({'Tiempo rocio': str(tiempo)})\n GPIO.output(19, False)\n elif tiempo == 15:\n print('Activacion de rodillos de limpieza... ')\n tiempo += 5\n GPIO.output(13, True)\n print('Desactivacion de rodillos de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillo': str(tiempo)})\n GPIO.output(13, False)\n elif tiempo == 20:\n print('Activacion de escobas de limpieza ')\n tiempo += 5\n GPIO.output(6, True)\n print('Desactivacion de escobas de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo escoba': str(tiempo)})\n GPIO.output(6, False)\n elif tiempo == 25:\n print('Activacion de rocio de agua 2nda vez ')\n tiempo += 5\n GPIO.output(5, True)\n print('Desactivacion de rocio de agua 2nda vez...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua 2nda': str(tiempo)})\n GPIO.output(5, False)\n elif tiempo == 30:\n print('Activacion de rodillos de secado')\n tiempo += 5\n GPIO.output(11, True)\n print('Desactivacion de rodillos de secado...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillos': str(tiempo)})\n GPIO.output(11, False)\nGPIO.cleanup()\n", "step-3": "<mask token>\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.OUT)\nGPIO.setup(26, GPIO.OUT)\nGPIO.setup(19, GPIO.OUT)\nGPIO.setup(13, GPIO.OUT)\nGPIO.setup(6, GPIO.OUT)\nGPIO.setup(5, GPIO.OUT)\nGPIO.setup(11, GPIO.OUT)\nGPIO.setup(20, GPIO.IN)\nGPIO.setup(16, GPIO.IN)\nPATH_CRED = '/home/pi/Desktop/cred.json'\nURL_DB = 'https://arquiii-default-rtdb.firebaseio.com/'\ncred = credentials.Certificate(PATH_CRED)\nfirebase_admin.initialize_app(cred, {'databaseURL': URL_DB})\nREF = db.reference('/')\nREF.set({'Proceso': {}})\nREF = db.reference('/Vehiculos')\nwhile True:\n tiempo = datetime.now()\n if GPIO.input(20):\n tiempoE = 5\n if GPIO.input(20):\n tamano = 'Pequeno'\n elif GPIO.input(20) and GPIO.input(16):\n tamano = 'Grande'\n else:\n tamano = 'Mediano'\n print('Se ha detectado un automovil de tamano', tamano)\n REF.push({'Recepcion': str(tiempo), 'Tamano': tamano})\n if tiempo == 5:\n print('Activacion de agua... ')\n tiempo += 5\n GPIO.output(26, True)\n print('Desactivacion de agua...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua': str(tiempo)})\n GPIO.output(26, False)\n elif tiempo == 10:\n print('Activacion de rocio de shampoo... ')\n tiempo += 5\n GPIO.output(19, True)\n print('Desactivacion de rocio de shampoo...')\n tiempo = datetime.now()\n REF.push({'Tiempo rocio': str(tiempo)})\n GPIO.output(19, False)\n elif tiempo == 15:\n print('Activacion de rodillos de limpieza... ')\n tiempo += 5\n GPIO.output(13, True)\n print('Desactivacion de rodillos de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillo': str(tiempo)})\n GPIO.output(13, False)\n elif tiempo == 20:\n print('Activacion de escobas de limpieza ')\n tiempo += 5\n GPIO.output(6, True)\n print('Desactivacion de escobas de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo escoba': str(tiempo)})\n GPIO.output(6, False)\n elif tiempo == 25:\n print('Activacion de rocio de agua 2nda vez ')\n tiempo += 5\n GPIO.output(5, True)\n print('Desactivacion de rocio de agua 2nda vez...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua 2nda': str(tiempo)})\n GPIO.output(5, False)\n elif tiempo == 30:\n print('Activacion de rodillos de secado')\n tiempo += 5\n GPIO.output(11, True)\n print('Desactivacion de rodillos de secado...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillos': str(tiempo)})\n GPIO.output(11, False)\nGPIO.cleanup()\n", "step-4": "from datetime import datetime\nimport time\nfrom os import system\nimport RPi.GPIO as GPIO\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.OUT)\nGPIO.setup(26, GPIO.OUT)\nGPIO.setup(19, GPIO.OUT)\nGPIO.setup(13, GPIO.OUT)\nGPIO.setup(6, GPIO.OUT)\nGPIO.setup(5, GPIO.OUT)\nGPIO.setup(11, GPIO.OUT)\nGPIO.setup(20, GPIO.IN)\nGPIO.setup(16, GPIO.IN)\nPATH_CRED = '/home/pi/Desktop/cred.json'\nURL_DB = 'https://arquiii-default-rtdb.firebaseio.com/'\ncred = credentials.Certificate(PATH_CRED)\nfirebase_admin.initialize_app(cred, {'databaseURL': URL_DB})\nREF = db.reference('/')\nREF.set({'Proceso': {}})\nREF = db.reference('/Vehiculos')\nwhile True:\n tiempo = datetime.now()\n if GPIO.input(20):\n tiempoE = 5\n if GPIO.input(20):\n tamano = 'Pequeno'\n elif GPIO.input(20) and GPIO.input(16):\n tamano = 'Grande'\n else:\n tamano = 'Mediano'\n print('Se ha detectado un automovil de tamano', tamano)\n REF.push({'Recepcion': str(tiempo), 'Tamano': tamano})\n if tiempo == 5:\n print('Activacion de agua... ')\n tiempo += 5\n GPIO.output(26, True)\n print('Desactivacion de agua...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua': str(tiempo)})\n GPIO.output(26, False)\n elif tiempo == 10:\n print('Activacion de rocio de shampoo... ')\n tiempo += 5\n GPIO.output(19, True)\n print('Desactivacion de rocio de shampoo...')\n tiempo = datetime.now()\n REF.push({'Tiempo rocio': str(tiempo)})\n GPIO.output(19, False)\n elif tiempo == 15:\n print('Activacion de rodillos de limpieza... ')\n tiempo += 5\n GPIO.output(13, True)\n print('Desactivacion de rodillos de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillo': str(tiempo)})\n GPIO.output(13, False)\n elif tiempo == 20:\n print('Activacion de escobas de limpieza ')\n tiempo += 5\n GPIO.output(6, True)\n print('Desactivacion de escobas de limpieza...')\n tiempo = datetime.now()\n REF.push({'Tiempo escoba': str(tiempo)})\n GPIO.output(6, False)\n elif tiempo == 25:\n print('Activacion de rocio de agua 2nda vez ')\n tiempo += 5\n GPIO.output(5, True)\n print('Desactivacion de rocio de agua 2nda vez...')\n tiempo = datetime.now()\n REF.push({'Tiempo agua 2nda': str(tiempo)})\n GPIO.output(5, False)\n elif tiempo == 30:\n print('Activacion de rodillos de secado')\n tiempo += 5\n GPIO.output(11, True)\n print('Desactivacion de rodillos de secado...')\n tiempo = datetime.now()\n REF.push({'Tiempo rodillos': str(tiempo)})\n GPIO.output(11, False)\nGPIO.cleanup()\n", "step-5": "from datetime import datetime\nimport time\nfrom os import system\nimport RPi.GPIO as GPIO\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM) \nGPIO.setup(21, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(26, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(19, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(13, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(6, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(5, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(11, GPIO.OUT) # este pin es de salida carro\nGPIO.setup(20, GPIO.IN) #Este pin es una entrada carro pequeno\nGPIO.setup(16, GPIO.IN) #Este pin es una entrada carro grande\n\n\nPATH_CRED = '/home/pi/Desktop/cred.json'\nURL_DB = 'https://arquiii-default-rtdb.firebaseio.com/'\ncred = credentials.Certificate(PATH_CRED)\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': URL_DB\n})\nREF = db.reference(\"/\")\n\nREF.set({\n 'Proceso': \n {\n }\n})\n\nREF = db.reference(\"/Vehiculos\")\n\nwhile True:\n tiempo = datetime.now()\n #Si hay un 1 en el pin 20\n if GPIO.input(20):\n tiempoE = 5 #tiempo que va a cambiar por estacion\n if GPIO.input(20):\n tamano = \"Pequeno\"\n elif GPIO.input(20) and GPIO.input(16):\n tamano = \"Grande\"\n else:\n tamano = \"Mediano\"\n \n print(\"Se ha detectado un automovil de tamano\",tamano)\n REF.push({ \n \"Recepcion\": str(tiempo),\n \"Tamano\": tamano, \n })\n if (tiempo == 5):\n print(\"Activacion de agua... \")\n tiempo += 5\n GPIO.output(26, True)\n print(\"Desactivacion de agua...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo agua\": str(tiempo), \n })\n GPIO.output(26, False)\n elif (tiempo == 10):\n print(\"Activacion de rocio de shampoo... \")\n tiempo += 5\n GPIO.output(19, True)\n print(\"Desactivacion de rocio de shampoo...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo rocio\": str(tiempo), \n })\n GPIO.output(19, False)\n elif (tiempo == 15):\n print(\"Activacion de rodillos de limpieza... \")\n tiempo += 5\n GPIO.output(13, True)\n print(\"Desactivacion de rodillos de limpieza...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo rodillo\": str(tiempo), \n })\n GPIO.output(13, False)\n elif (tiempo == 20):\n print(\"Activacion de escobas de limpieza \")\n tiempo += 5\n GPIO.output(6, True)\n print(\"Desactivacion de escobas de limpieza...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo escoba\": str(tiempo), \n })\n GPIO.output(6, False)\n elif (tiempo == 25):\n print(\"Activacion de rocio de agua 2nda vez \")\n tiempo += 5\n GPIO.output(5, True)\n print(\"Desactivacion de rocio de agua 2nda vez...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo agua 2nda\": str(tiempo), \n })\n GPIO.output(5, False)\n elif (tiempo == 30):\n print(\"Activacion de rodillos de secado\")\n tiempo += 5\n GPIO.output(11, True)\n print(\"Desactivacion de rodillos de secado...\")\n tiempo = datetime.now()\n REF.push({ \n \"Tiempo rodillos\": str(tiempo), \n })\n GPIO.output(11, False)\n \nGPIO.cleanup()\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import Normalizer from sklearn.preprocessing import MinMaxScaler import pandas as pd import numpy as np def preprocess_transformers(y_train, transf): if transf != 'ln': if transf == 'minmax': scaler = MinMaxScaler() scaler2 = MinMaxScaler() elif transf == 'standard': scaler = StandardScaler() scaler2 = StandardScaler() elif transf == 'robust': scaler = RobustScaler() scaler2 = RobustScaler() elif transf == 'boxcox': scaler = PowerTransformer(method='yeo-johnson') scaler2 = PowerTransformer(method='yeo-johnson') mm_scaler2 = scaler2.fit(y_train) y_train = mm_scaler2.transform(y_train) else: # y_train = y_train.values y_train = np.log(y_train).values mm_scaler2 = '' return y_train, mm_scaler2 def transformacion_inversa(y_predict, mm_scaler2): if mm_scaler2 != '': y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict)) else: y_predict = np.exp(y_predict) # y_predict = y_predict return y_predict def predict_model(config, model): if type(config) == dict: df = pd.DataFrame(config, index=[0]) else: df = config print(f'df: {df}') # prepared_df, scaler = preprocess_transformers(df, 'minmax') prepared_df = df y_pred = model.predict(prepared_df) print(f'y_pred {y_pred}') # print(f'scaler {scaler}') return y_pred # return 1
normal
{ "blob_id": "890d50c741ffd576312c63dc450e274b4517bf12", "index": 9856, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n print(f'df: {df}')\n prepared_df = df\n y_pred = model.predict(prepared_df)\n print(f'y_pred {y_pred}')\n return y_pred\n", "step-4": "from sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n print(f'df: {df}')\n prepared_df = df\n y_pred = model.predict(prepared_df)\n print(f'y_pred {y_pred}')\n return y_pred\n", "step-5": "from sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n # y_train = y_train.values\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n # y_predict = y_predict\n\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n\n print(f'df: {df}')\n # prepared_df, scaler = preprocess_transformers(df, 'minmax')\n\n prepared_df = df\n y_pred = model.predict(prepared_df)\n\n print(f'y_pred {y_pred}')\n # print(f'scaler {scaler}')\n\n return y_pred\n # return 1\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
DATABASE_NAME = "user_db"
normal
{ "blob_id": "8c8bbbc682889c8d79c893f27def76ad70e8bf8d", "index": 233, "step-1": "<mask token>\n", "step-2": "DATABASE_NAME = 'user_db'\n", "step-3": "DATABASE_NAME = \"user_db\"", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python from __future__ import print_function import weechat import sys import pickle import json import math import os.path from datetime import datetime from datetime import date from datetime import timedelta from dateutil.parser import parse as datetime_parse from os.path import expanduser from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request # TODO: Add settings # minutes_remaining = [5, 10, 15] # notify_enabled = yes/no # time_format = '%H:%M' ??? SCRIPT_NAME = 'weechat-gcal' SCRIPT_AUTHOR = 'Dave Mulford' SCRIPT_VERSION = '0.1' SCRIPT_LICENSE = 'GPL2' SCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.' SCRIPT_SHUTDOWN_FN = '' SCRIPT_CHARSET = '' TIMEOUT_MS = 3000 CALLED_FROM_CMD = '100' CALLED_FROM_TIMER = '200' NOTIFICATION_THRESHOLDS = [5,15] # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/calendar.readonly'] # Where the weechat-gcal-token.pickle file is located CACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal') # ============================= # GOOGLE CALENDAR FUNCTIONS # ============================= def _load_credentials(creds_file=None): """Loads the credentials from a credentials.json file or by prompting for authentication. Returns a credentials object to be used by the Google Sheets API. """ creds = None # Validate the credentials file if not creds_file: creds_file = 'credentials.json' if not os.path.exists(creds_file): creds_file = os.path.join(expanduser('~'), 'credentials.json') if not os.path.exists(creds_file): raise SystemExit('Could not find a credentials.json file. ' \ 'Either pass one as argument or make sure credentials.json exists in ' \ 'the current directory or ' + expanduser('~')) # Creates CACHE_DIR if it does not exist # mode 0x777 (the default) is used because the system's umask value is masked out first if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR) pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle') # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first time. if os.path.exists(pickle_filename): with open(pickle_filename, 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(pickle_filename, 'wb') as token: pickle.dump(creds, token) return creds def gc_get_events(num_events=50): creds = _load_credentials() service = build('calendar', 'v3', credentials=creds) # Call the Calendar API now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time tomorrow = datetime.combine( \ date.today() + timedelta(days=2), \ datetime.min.time()) \ .isoformat() + 'Z' #print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow)) events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) return events # ============================= # WEECHAT HELPER FUNCTIONS # ============================= def buffer_get(): """Finds or creates a buffer to use for script output. Returns a buffer pointer. """ buffer = weechat.buffer_search('python', SCRIPT_NAME) if not buffer: buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '') weechat.buffer_set(buffer, 'time_for_each_line', '0') weechat.buffer_set(buffer, 'nicklist', '0') weechat.buffer_set(buffer, 'title', 'Google Calendar') weechat.buffer_set(buffer, 'localvar_set_no_log', '1') return buffer def buffer_input(data, buffer, input_data): """A function called when text, that is not a command, is entered in the weechat-gcal buffer. This function exists to prevent errors from being shown, there is no functionality. """ return weechat.WEECHAT_RC_OK def update_gcal_buffer(buffer, events): weechat.buffer_clear(buffer) if events == []: weechat.prnt(buffer, 'No events for now. YAY!!!') dates = {} for event in events: dt = datetime_parse(event['date']) datestr = dt.strftime('%a %Y-%m-%d') timestr = dt.strftime('%H:%M') if datestr not in dates: dates[datestr] = [] dates[datestr].append({ 'time': timestr, 'summary': event['summary'] }) for datestr in dates.keys(): weechat.prnt(buffer, datestr) dt_events = dates[datestr] for event in dt_events: weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary'])) # ============================= # MAIN SCRIPT FUNCTIONS # ============================= def get_calendar(*args): result = [] try: events = gc_get_events() for event in events: start = event['start'].get('dateTime', event['start'].get('date')) result.append({ 'date': start, 'summary': event['summary'] }) except Exception as err: result = err return json.dumps(result) def get_calendar_callback(data, command, return_code, out, err): result = json.loads(out) buffer = buffer_get() update_gcal_buffer(buffer, result) # Notify if any events are happening in 10 minutes! if data == CALLED_FROM_TIMER: for event in result: #weechat.prnt(buffer, 'Handling event!') dt = datetime_parse(event['date']) now = datetime.now(tz=dt.tzinfo) timediff = dt - now minutes_remaining = math.ceil(timediff.total_seconds() / 60) #weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining)) # TODO Make minutes_remaining threshold configurable if minutes_remaining in NOTIFICATION_THRESHOLDS: msg = '[{}m] {}'.format(minutes_remaining, event['summary']) weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg) return weechat.WEECHAT_RC_OK def gcal_command(data, buffer, args): buffer = buffer_get() # TODO Implement init if args == 'init': pass else: weechat.hook_process( 'func:get_calendar', TIMEOUT_MS, 'get_calendar_callback', CALLED_FROM_CMD ) return weechat.WEECHAT_RC_OK def script_main(data, remaining_calls): # Weechat is single-threaded so a new process is created so other things aren't held up # if retrieving Google Calendar events doesn't return in a timely manner. # https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture weechat.hook_process( 'func:get_calendar', TIMEOUT_MS, 'get_calendar_callback', CALLED_FROM_TIMER ) return weechat.WEECHAT_RC_OK # Register the script on /script load # This needs to happen first! weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \ SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET) # Setup a command to initialize the Google Calendar authentication and show events in a buffer. weechat.hook_command( 'gcal', 'Displays events for today and tomorrow in a new buffer.', '[init]', ' || init - Initializes the items needed for this plugin to work.', '', 'gcal_command', '' ) # Check once per minute whether we should notify of imminent events weechat.hook_timer(60000, 60, 0, 'script_main', '')
normal
{ "blob_id": "0ed0fb6f9bcc768bb005222c9ae9b454f6d962ec", "index": 9148, "step-1": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\n<mask token>\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n", "step-3": "<mask token>\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n", "step-4": "from __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n", "step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n# TODO: Add settings\n# minutes_remaining = [5, 10, 15]\n# notify_enabled = yes/no\n# time_format = '%H:%M' ???\n\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.'\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\n\nTIMEOUT_MS = 3000\n\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\n\nNOTIFICATION_THRESHOLDS = [5,15]\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\n\n# Where the weechat-gcal-token.pickle file is located\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n# =============================\n# GOOGLE CALENDAR FUNCTIONS\n# =============================\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n tomorrow = datetime.combine( \\\n date.today() + timedelta(days=2), \\\n datetime.min.time()) \\\n .isoformat() + 'Z'\n\n #print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow))\n events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow,\n maxResults=num_events, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n# =============================\n# WEECHAT HELPER FUNCTIONS\n# =============================\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n\n return buffer\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n\n if datestr not in dates:\n dates[datestr] = []\n\n dates[datestr].append({\n 'time': timestr,\n 'summary': event['summary']\n })\n\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary']))\n\n# =============================\n# MAIN SCRIPT FUNCTIONS\n# =============================\n\ndef get_calendar(*args):\n result = []\n\n try:\n events = gc_get_events()\n\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({\n 'date': start,\n 'summary': event['summary']\n })\n except Exception as err:\n result = err\n\n return json.dumps(result)\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n\n # Notify if any events are happening in 10 minutes!\n if data == CALLED_FROM_TIMER:\n for event in result:\n #weechat.prnt(buffer, 'Handling event!')\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n\n #weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining))\n\n # TODO Make minutes_remaining threshold configurable\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n\n return weechat.WEECHAT_RC_OK\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n\n # TODO Implement init\n if args == 'init':\n pass\n else:\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_CMD\n )\n\n return weechat.WEECHAT_RC_OK\n\ndef script_main(data, remaining_calls):\n # Weechat is single-threaded so a new process is created so other things aren't held up\n # if retrieving Google Calendar events doesn't return in a timely manner.\n # https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_TIMER\n )\n\n return weechat.WEECHAT_RC_OK\n\n# Register the script on /script load\n# This needs to happen first!\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \\\n SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\n\n# Setup a command to initialize the Google Calendar authentication and show events in a buffer.\nweechat.hook_command(\n 'gcal',\n 'Displays events for today and tomorrow in a new buffer.',\n '[init]',\n ' || init - Initializes the items needed for this plugin to work.',\n '',\n 'gcal_command',\n ''\n)\n\n# Check once per minute whether we should notify of imminent events\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n", "step-ids": [ 7, 10, 11, 12, 13 ] }
[ 7, 10, 11, 12, 13 ]
#!/usr/bin/python -tt # snmp3_test # Claudia # PyCharm __author__ = "Claudia de Luna ([email protected])" __version__ = ": 1.0 $" __date__ = "10/23/16 11:25 AM" __copyright__ = "Copyright (c) 2015 Claudia de Luna" __license__ = "Python" #from __future__ import print_function import sys import snmp_helper # Provided main() calls the above functions def main(): # Take path argument and list all text files """ Test SNMPv3 script utilizing Kirks snmp_helper module """ ip = '10.1.10.100' a_user = 'cisco' auth_key = 'cisco123' encr_key = 'cisco123' snmp_user = (a_user, auth_key, encr_key) sw1 = (ip, 161) sysDescr = '1.3.6.1.2.1.1.1.0' sysObjectID = '1.3.6.1.2.1.1.2.0' sysUpTime = '1.3.6.1.2.1.1.3.0' sysContact = '1.3.6.1.2.1.1.4.0' sysNmae = '1.3.6.1.2.1.1.5.0' ifNumber = '1.3.6.1.2.1.2.1.0' # Uptime when running config last changed RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0' # Uptime when running config last saved (note any 'write' constitutes a save) RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0' # Uptime when startup config last saved StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0' ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1' ifName = '1.3.6.1.2.1.31.1.1.1.1.1' snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des') #print(snmp_data) # snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha', # encrypt_proto='aes128', display_errors=True): #snmp_extract(snmp_data): output = snmp_helper.snmp_extract(snmp_data) print output # Standard call to the main() function. if __name__ == '__main__': if len(sys.argv) != 1: #print '\nUsage: snmp3_test.py \nExample: python snmp3_test.py\n\n' sys.exit() else: main()
normal
{ "blob_id": "ccdae522983ddc7c02e221ab5c1bc32683358a7b", "index": 2883, "step-1": "#!/usr/bin/python -tt\n# snmp3_test\n# Claudia\n# PyCharm\n__author__ = \"Claudia de Luna ([email protected])\"\n__version__ = \": 1.0 $\"\n__date__ = \"10/23/16 11:25 AM\"\n__copyright__ = \"Copyright (c) 2015 Claudia de Luna\"\n__license__ = \"Python\"\n\n\n#from __future__ import print_function\nimport sys\nimport snmp_helper\n\n\n\n# Provided main() calls the above functions\ndef main():\n # Take path argument and list all text files\n \"\"\"\n Test SNMPv3 script utilizing Kirks snmp_helper module\n\n \"\"\"\n\n ip = '10.1.10.100'\n a_user = 'cisco'\n auth_key = 'cisco123'\n encr_key = 'cisco123'\n snmp_user = (a_user, auth_key, encr_key)\n sw1 = (ip, 161)\n\n sysDescr = '1.3.6.1.2.1.1.1.0'\n sysObjectID = '1.3.6.1.2.1.1.2.0'\n sysUpTime = '1.3.6.1.2.1.1.3.0'\n sysContact = '1.3.6.1.2.1.1.4.0'\n sysNmae = '1.3.6.1.2.1.1.5.0'\n ifNumber = '1.3.6.1.2.1.2.1.0'\n\n\n # Uptime when running config last changed\n RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'\n\n # Uptime when running config last saved (note any 'write' constitutes a save)\n RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'\n\n # Uptime when startup config last saved\n StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'\n\n ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1'\n ifName = '1.3.6.1.2.1.31.1.1.1.1.1'\n\n snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des')\n #print(snmp_data)\n\n # snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',\n # encrypt_proto='aes128', display_errors=True):\n\n #snmp_extract(snmp_data):\n\n output = snmp_helper.snmp_extract(snmp_data)\n print output\n\n\n\n# Standard call to the main() function.\nif __name__ == '__main__':\n if len(sys.argv) != 1:\n #print '\\nUsage: snmp3_test.py \\nExample: python snmp3_test.py\\n\\n'\n sys.exit()\n else:\n main()\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Any object containing execute(self) method is considered to be IDE App # this is Duck typing concept class PyCharm: def execute(self): print("pycharm ide runnig") class MyIde: def execute(self): print("MyIde running") class Laptop: def code(self,ide): ide.execute() ide=MyIde() obj=Laptop() obj.code(ide)
normal
{ "blob_id": "9ab3dd87f17ac75a3831e9ec1f0746ad81fad70d", "index": 501, "step-1": "<mask token>\n\n\nclass MyIde:\n <mask token>\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n", "step-2": "class PyCharm:\n <mask token>\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n", "step-3": "class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\nobj.code(ide)\n", "step-4": "class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\nide = MyIde()\nobj = Laptop()\nobj.code(ide)\n", "step-5": "\r\n# Any object containing execute(self) method is considered to be IDE App\r\n# this is Duck typing concept\r\n\r\nclass PyCharm:\r\n def execute(self):\r\n print(\"pycharm ide runnig\")\r\n\r\nclass MyIde:\r\n def execute(self):\r\n print(\"MyIde running\")\r\n\r\nclass Laptop:\r\n\r\n def code(self,ide):\r\n ide.execute()\r\n\r\nide=MyIde()\r\n\r\nobj=Laptop()\r\n\r\nobj.code(ide)\r\n", "step-ids": [ 3, 5, 7, 8, 9 ] }
[ 3, 5, 7, 8, 9 ]
__version__ = '1.1.3rc0'
normal
{ "blob_id": "2e5bbc8c6a5eac2ed71c5d8619bedde2e04ee9a6", "index": 4932, "step-1": "<mask token>\n", "step-2": "__version__ = '1.1.3rc0'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import numpy #calculate field of simple def dipole(x, y, z, dx, dy, dz, mx, my, mz): R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2 return (3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5, 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5, 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5) #calculaion only one component of dipole def dipoleX(x, y, z, dx, dy, dz, mx, my, mz): R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2 return 3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5 def dipoleY(x, y, z, dx, dy, dz, mx, my, mz): R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2 return 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5 def dipoleZ(x, y, z, dx, dy, dz, mx, my, mz): R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2 return 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5 #calculate field caused by crack from array of coordinates and magntization of crack parts def crack(x,y,z,coordinates,magnetization): ret = numpy.array([0.0]*3) for it in range(len(coordinates)): ret+=numpy.array(dipole(x,y,z,coordinates[it][0],coordinates[it][1],coordinates[it][2],magnetization[it][0],magnetization[it][1],magnetization[it][2])) return ret #generator of crack parts coordinates and magntization def crackGenerator(funcCoord, funcMagn,crackLen = 30, paramBouns = [0,1]): coordinates = [] magnetization = [] for t in numpy.arange(paramBouns[0],paramBouns[1],(paramBouns[1]-paramBouns[0])/crackLen): coordinates.append(funcCoord(t)) magnetization.append(funcMagn(t)) return coordinates,magnetization #generates one random crack in volume vol def randomCrackExampleLinearModel(vol): sizeMax = (vol[3]/5,vol[4]/5,vol[5]/5) coordParams = numpy.random.rand(3,2) return crackGenerator(lambda t:(coordParams[0][0]*vol[3]+vol[0]+t*coordParams[0][1]*sizeMax[0], coordParams[1][0]*vol[4]+vol[1]+t*coordParams[1][1]*sizeMax[1], coordParams[2][0]*vol[5]+vol[2]+t*coordParams[2][1]*sizeMax[2]), lambda t: (0,0,10+numpy.random.rand()*t))
normal
{ "blob_id": "9d37d1618fb9d00d63b7ed58290c5ba1b8f106cd", "index": 4599, "step-1": "import numpy \n\n#calculate field of simple \ndef dipole(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return (3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5,\n 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5,\n 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5)\n#calculaion only one component of dipole \ndef dipoleX(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5\ndef dipoleY(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5\ndef dipoleZ(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5\n\n#calculate field caused by crack from array of coordinates and magntization of crack parts\ndef crack(x,y,z,coordinates,magnetization):\n ret = numpy.array([0.0]*3)\n for it in range(len(coordinates)):\n ret+=numpy.array(dipole(x,y,z,coordinates[it][0],coordinates[it][1],coordinates[it][2],magnetization[it][0],magnetization[it][1],magnetization[it][2]))\n return ret\n\n#generator of crack parts coordinates and magntization \ndef crackGenerator(funcCoord, funcMagn,crackLen = 30, paramBouns = [0,1]):\n coordinates = []\n magnetization = []\n for t in numpy.arange(paramBouns[0],paramBouns[1],(paramBouns[1]-paramBouns[0])/crackLen):\n coordinates.append(funcCoord(t))\n magnetization.append(funcMagn(t))\n return coordinates,magnetization\n\n#generates one random crack in volume vol\ndef randomCrackExampleLinearModel(vol):\n sizeMax = (vol[3]/5,vol[4]/5,vol[5]/5)\n coordParams = numpy.random.rand(3,2)\n return crackGenerator(lambda t:(coordParams[0][0]*vol[3]+vol[0]+t*coordParams[0][1]*sizeMax[0],\n coordParams[1][0]*vol[4]+vol[1]+t*coordParams[1][1]*sizeMax[1],\n coordParams[2][0]*vol[5]+vol[2]+t*coordParams[2][1]*sizeMax[2]),\n lambda t: (0,0,10+numpy.random.rand()*t))", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import pymysql db= pymysql.connect(host = 'localhost', port = 3306, user = 'root', password = 'Wubaba950823', database = 'mydb', charset = 'utf8mb4' ) # 使用cursor()方法获取操作游标 cursor = db.cursor() # SQL 插入语句 里面的数据类型要对应 sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3','经典','2019/12/14') print(sql) try: # 执行sql语句 cursor.execute(sql) # 执行sql语句 db.commit() except: # 发生错误时回滚 db.rollback() # 关闭数据库连接 db.close()
normal
{ "blob_id": "8566e30a6450a72a0e441155321bd03363944b5a", "index": 8236, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n", "step-3": "<mask token>\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n", "step-4": "import pymysql\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n", "step-5": "import pymysql\n\ndb= pymysql.connect(host = 'localhost',\n port = 3306,\n user = 'root',\n password = 'Wubaba950823',\n database = 'mydb',\n charset = 'utf8mb4'\n )\n \n# 使用cursor()方法获取操作游标 \ncursor = db.cursor()\n\n# SQL 插入语句 里面的数据类型要对应\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3','经典','2019/12/14')\nprint(sql)\ntry:\n # 执行sql语句\n cursor.execute(sql)\n # 执行sql语句\n db.commit()\nexcept:\n # 发生错误时回滚\n db.rollback()\n \n# 关闭数据库连接\ndb.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from day19.rules import Rule, CharacterMatch, OrRule, ListRule def parse_rule(rules: dict, rule_str: str=None) ->Rule: if rule_str is None: rule_str: str = rules[0] if '"' in rule_str: return CharacterMatch(rule_str.strip('"')) elif '|' in rule_str: or_rules = [parse_rule(rules, part.strip()) for part in rule_str. split('|')] return OrRule(*or_rules) elif ' ' in rule_str: and_rules = [parse_rule(rules, part.strip()) for part in rule_str. split(' ')] return ListRule(*and_rules) elif rule_str.strip().isnumeric(): return parse_rule(rules, rules.get(int(rule_str))) else: print(f'WUT? {rule_str}') if __name__ == '__main__': with open('input.txt', 'rt') as puzzle: rules = dict() while True: line = puzzle.readline().strip() if not line: break number, rule = line.split(': ') rules[int(number)] = rule inputs = [] while True: line = puzzle.readline().strip() if not line: break inputs.append(line) rule = parse_rule(rules) matches = sum([is_match for is_match, left in [rule.match(_input) for _input in inputs] if not left]) print(f'number of matching messages: {matches}')
normal
{ "blob_id": "4d4f7db6d5b4ed7eac3ced73aca76d3c952c84f4", "index": 1456, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_rule(rules: dict, rule_str: str=None) ->Rule:\n if rule_str is None:\n rule_str: str = rules[0]\n if '\"' in rule_str:\n return CharacterMatch(rule_str.strip('\"'))\n elif '|' in rule_str:\n or_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split('|')]\n return OrRule(*or_rules)\n elif ' ' in rule_str:\n and_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split(' ')]\n return ListRule(*and_rules)\n elif rule_str.strip().isnumeric():\n return parse_rule(rules, rules.get(int(rule_str)))\n else:\n print(f'WUT? {rule_str}')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef parse_rule(rules: dict, rule_str: str=None) ->Rule:\n if rule_str is None:\n rule_str: str = rules[0]\n if '\"' in rule_str:\n return CharacterMatch(rule_str.strip('\"'))\n elif '|' in rule_str:\n or_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split('|')]\n return OrRule(*or_rules)\n elif ' ' in rule_str:\n and_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split(' ')]\n return ListRule(*and_rules)\n elif rule_str.strip().isnumeric():\n return parse_rule(rules, rules.get(int(rule_str)))\n else:\n print(f'WUT? {rule_str}')\n\n\nif __name__ == '__main__':\n with open('input.txt', 'rt') as puzzle:\n rules = dict()\n while True:\n line = puzzle.readline().strip()\n if not line:\n break\n number, rule = line.split(': ')\n rules[int(number)] = rule\n inputs = []\n while True:\n line = puzzle.readline().strip()\n if not line:\n break\n inputs.append(line)\n rule = parse_rule(rules)\n matches = sum([is_match for is_match, left in [rule.match(_input) for\n _input in inputs] if not left])\n print(f'number of matching messages: {matches}')\n", "step-4": "from day19.rules import Rule, CharacterMatch, OrRule, ListRule\n\n\ndef parse_rule(rules: dict, rule_str: str=None) ->Rule:\n if rule_str is None:\n rule_str: str = rules[0]\n if '\"' in rule_str:\n return CharacterMatch(rule_str.strip('\"'))\n elif '|' in rule_str:\n or_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split('|')]\n return OrRule(*or_rules)\n elif ' ' in rule_str:\n and_rules = [parse_rule(rules, part.strip()) for part in rule_str.\n split(' ')]\n return ListRule(*and_rules)\n elif rule_str.strip().isnumeric():\n return parse_rule(rules, rules.get(int(rule_str)))\n else:\n print(f'WUT? {rule_str}')\n\n\nif __name__ == '__main__':\n with open('input.txt', 'rt') as puzzle:\n rules = dict()\n while True:\n line = puzzle.readline().strip()\n if not line:\n break\n number, rule = line.split(': ')\n rules[int(number)] = rule\n inputs = []\n while True:\n line = puzzle.readline().strip()\n if not line:\n break\n inputs.append(line)\n rule = parse_rule(rules)\n matches = sum([is_match for is_match, left in [rule.match(_input) for\n _input in inputs] if not left])\n print(f'number of matching messages: {matches}')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import matplotlib.pyplot as plt import matplotlib import matplotlib.colors as colors import matplotlib.cm as cm def plot_hist(data_list): plt.hist(data_list, bins=500) plt.show() return def compare_hits_plot(np_array, compare=False): if compare: clist = list(np_array[:,2]) minima, maxima = min(clist), max(clist) print minima, maxima hits=np_array[np_array[:,2]==1] total_hits=np_array[np_array[:,2]>=1] scatter = plt.scatter(np_array[:,3], np_array[:,1], c=clist, vmin=0, vmax=1, s=8, cmap=cm.winter) plt.ylim(ymin=0, ymax=max(hits[:,3])) plt.colorbar(scatter) plt.axhline(spot_count_cutoff) else: scatter = plt.scatter(np_array[:,3], np_array[:,1]) def pickle_ratio_plot(np_array): clist = list(np_array[:,5]) minima, maxima = min(clist), max(clist) print minima, maxima scatter = plt.scatter(np_array[:,1], np_array[:,2], c=clist, s=8, cmap=cm.winter) plt.colorbar(scatter) plt.axhline(spot_count_cutoff)
normal
{ "blob_id": "b6adb956aed934451fc21e51663be36d08c5b645", "index": 2535, "step-1": "import matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\n\ndef plot_hist(data_list):\n plt.hist(data_list, bins=500)\n plt.show()\n return\n\ndef compare_hits_plot(np_array, compare=False):\n if compare:\n clist = list(np_array[:,2])\n minima, maxima = min(clist), max(clist)\n print minima, maxima\n hits=np_array[np_array[:,2]==1]\n total_hits=np_array[np_array[:,2]>=1]\n scatter = plt.scatter(np_array[:,3], np_array[:,1], c=clist, vmin=0, vmax=1, s=8, cmap=cm.winter)\n plt.ylim(ymin=0, ymax=max(hits[:,3]))\n plt.colorbar(scatter)\n plt.axhline(spot_count_cutoff)\n else:\n scatter = plt.scatter(np_array[:,3], np_array[:,1])\n\n\ndef pickle_ratio_plot(np_array):\n clist = list(np_array[:,5])\n minima, maxima = min(clist), max(clist)\n print minima, maxima\n scatter = plt.scatter(np_array[:,1], np_array[:,2], c=clist, s=8, cmap=cm.winter)\n plt.colorbar(scatter)\n plt.axhline(spot_count_cutoff)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import MySQLdb import MySQLdb.cursors from flask import _app_ctx_stack, current_app class MySQL(object): def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): """Initialize the `app` for use with this :class:`~flask_mysqldb.MySQL` class. This is called automatically if `app` is passed to :meth:`~MySQL.__init__`. :param flask.Flask app: the application to configure for use with this :class:`~flask_mysqldb.MySQL` class. """ app.config.setdefault('MYSQL_HOST', 'localhost') app.config.setdefault('MYSQL_USER', None) app.config.setdefault('MYSQL_PASSWORD', None) app.config.setdefault('MYSQL_DB', None) app.config.setdefault('MYSQL_PORT', 3306) app.config.setdefault('MYSQL_UNIX_SOCKET', None) app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10) app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None) app.config.setdefault('MYSQL_USE_UNICODE', True) app.config.setdefault('MYSQL_CHARSET', 'utf8') app.config.setdefault('MYSQL_SQL_MODE', None) app.config.setdefault('MYSQL_CURSORCLASS', None) if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) @property def connect(self): kwargs = {} if current_app.config['MYSQL_HOST']: kwargs['host'] = current_app.config['MYSQL_HOST'] if current_app.config['MYSQL_USER']: kwargs['user'] = current_app.config['MYSQL_USER'] if current_app.config['MYSQL_PASSWORD']: kwargs['passwd'] = current_app.config['MYSQL_PASSWORD'] if current_app.config['MYSQL_DB']: kwargs['db'] = current_app.config['MYSQL_DB'] if current_app.config['MYSQL_PORT']: kwargs['port'] = current_app.config['MYSQL_PORT'] if current_app.config['MYSQL_UNIX_SOCKET']: kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET'] if current_app.config['MYSQL_CONNECT_TIMEOUT']: kwargs['connect_timeout'] = \ current_app.config['MYSQL_CONNECT_TIMEOUT'] if current_app.config['MYSQL_READ_DEFAULT_FILE']: kwargs['read_default_file'] = \ current_app.config['MYSQL_READ_DEFAULT_FILE'] if current_app.config['MYSQL_USE_UNICODE']: kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE'] if current_app.config['MYSQL_CHARSET']: kwargs['charset'] = current_app.config['MYSQL_CHARSET'] if current_app.config['MYSQL_SQL_MODE']: kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE'] if current_app.config['MYSQL_CURSORCLASS']: kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.config['MYSQL_CURSORCLASS']) return MySQLdb.connect(**kwargs) @property def connection(self): """Attempts to connect to the MySQL server. :return: Bound MySQL connection object if successful or ``None`` if unsuccessful. """ ctx = _app_ctx_stack.top if ctx is not None: if not hasattr(ctx, 'mysql_db'): ctx.mysql_db = self.connect return ctx.mysql_db def teardown(self, exception): ctx = _app_ctx_stack.top if hasattr(ctx, 'mysql_db'): ctx.mysql_db.close()
normal
{ "blob_id": "db8c2f6f5da0b52c268634043e1132984f610eed", "index": 8405, "step-1": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n <mask token>\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n <mask token>\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n", "step-2": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n <mask token>\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n", "step-3": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n", "step-4": "import MySQLdb\nimport MySQLdb.cursors\nfrom flask import _app_ctx_stack, current_app\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n", "step-5": "import MySQLdb\nimport MySQLdb.cursors\nfrom flask import _app_ctx_stack, current_app\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = \\\n current_app.config['MYSQL_CONNECT_TIMEOUT']\n\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = \\\n current_app.config['MYSQL_READ_DEFAULT_FILE']\n\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.config['MYSQL_CURSORCLASS'])\n\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
for i in range(-10,0): print(i,end=" ")
normal
{ "blob_id": "8d0fcf0bf5effec9aa04e7cd56b4b7098c6713cb", "index": 70, "step-1": "<mask token>\n", "step-2": "for i in range(-10, 0):\n print(i, end=' ')\n", "step-3": "for i in range(-10,0):\n print(i,end=\" \")", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Stubs for binascii # Based on http://docs.python.org/3.2/library/binascii.html import sys from typing import Union, Text if sys.version_info < (3,): # Python 2 accepts unicode ascii pretty much everywhere. _Bytes = Text _Ascii = Text else: # But since Python 3.3 ASCII-only unicode strings are accepted by the # a2b_* functions. _Bytes = bytes _Ascii = Union[bytes, str] def a2b_uu(string: _Ascii) -> bytes: ... if sys.version_info >= (3, 7): def b2a_uu(data: _Bytes, *, backtick: bool = ...) -> bytes: ... else: def b2a_uu(data: _Bytes) -> bytes: ... def a2b_base64(string: _Ascii) -> bytes: ... if sys.version_info >= (3, 6): def b2a_base64(data: _Bytes, *, newline: bool = ...) -> bytes: ... else: def b2a_base64(data: _Bytes) -> bytes: ... def a2b_qp(string: _Ascii, header: bool = ...) -> bytes: ... def b2a_qp(data: _Bytes, quotetabs: bool = ..., istext: bool = ..., header: bool = ...) -> bytes: ... def a2b_hqx(string: _Ascii) -> bytes: ... def rledecode_hqx(data: _Bytes) -> bytes: ... def rlecode_hqx(data: _Bytes) -> bytes: ... def b2a_hqx(data: _Bytes) -> bytes: ... def crc_hqx(data: _Bytes, crc: int) -> int: ... def crc32(data: _Bytes, crc: int = ...) -> int: ... def b2a_hex(data: _Bytes) -> bytes: ... def hexlify(data: _Bytes) -> bytes: ... def a2b_hex(hexstr: _Ascii) -> bytes: ... def unhexlify(hexlify: _Ascii) -> bytes: ... class Error(Exception): ... class Incomplete(Exception): ...
normal
{ "blob_id": "9ba74c7ecbd20c59883aff4efdc7e0369ff65daf", "index": 5267, "step-1": "<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\n<mask token>\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n", "step-2": "<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n", "step-3": "<mask token>\n\n\ndef a2b_uu(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n", "step-4": "<mask token>\nif sys.version_info < (3,):\n _Bytes = Text\n _Ascii = Text\nelse:\n _Bytes = bytes\n _Ascii = Union[bytes, str]\n\n\ndef a2b_uu(string: _Ascii) ->bytes:\n ...\n\n\nif sys.version_info >= (3, 7):\n\n def b2a_uu(data: _Bytes, *, backtick: bool=...) ->bytes:\n ...\nelse:\n\n def b2a_uu(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\nif sys.version_info >= (3, 6):\n\n def b2a_base64(data: _Bytes, *, newline: bool=...) ->bytes:\n ...\nelse:\n\n def b2a_base64(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n", "step-5": "# Stubs for binascii\n\n# Based on http://docs.python.org/3.2/library/binascii.html\n\nimport sys\nfrom typing import Union, Text\n\nif sys.version_info < (3,):\n # Python 2 accepts unicode ascii pretty much everywhere.\n _Bytes = Text\n _Ascii = Text\nelse:\n # But since Python 3.3 ASCII-only unicode strings are accepted by the\n # a2b_* functions.\n _Bytes = bytes\n _Ascii = Union[bytes, str]\n\ndef a2b_uu(string: _Ascii) -> bytes: ...\nif sys.version_info >= (3, 7):\n def b2a_uu(data: _Bytes, *, backtick: bool = ...) -> bytes: ...\nelse:\n def b2a_uu(data: _Bytes) -> bytes: ...\ndef a2b_base64(string: _Ascii) -> bytes: ...\nif sys.version_info >= (3, 6):\n def b2a_base64(data: _Bytes, *, newline: bool = ...) -> bytes: ...\nelse:\n def b2a_base64(data: _Bytes) -> bytes: ...\ndef a2b_qp(string: _Ascii, header: bool = ...) -> bytes: ...\ndef b2a_qp(data: _Bytes, quotetabs: bool = ..., istext: bool = ..., header: bool = ...) -> bytes: ...\ndef a2b_hqx(string: _Ascii) -> bytes: ...\ndef rledecode_hqx(data: _Bytes) -> bytes: ...\ndef rlecode_hqx(data: _Bytes) -> bytes: ...\ndef b2a_hqx(data: _Bytes) -> bytes: ...\ndef crc_hqx(data: _Bytes, crc: int) -> int: ...\ndef crc32(data: _Bytes, crc: int = ...) -> int: ...\ndef b2a_hex(data: _Bytes) -> bytes: ...\ndef hexlify(data: _Bytes) -> bytes: ...\ndef a2b_hex(hexstr: _Ascii) -> bytes: ...\ndef unhexlify(hexlify: _Ascii) -> bytes: ...\n\nclass Error(Exception): ...\nclass Incomplete(Exception): ...\n", "step-ids": [ 13, 15, 16, 17, 19 ] }
[ 13, 15, 16, 17, 19 ]
a=int(raw_input()) if (a%2)==0: print("Even") else: print("Odd")
normal
{ "blob_id": "00b06b5e6465bae3eab336441b283a9831bb93c0", "index": 4531, "step-1": "<mask token>\n", "step-2": "<mask token>\nif a % 2 == 0:\n print('Even')\nelse:\n print('Odd')\n", "step-3": "a = int(raw_input())\nif a % 2 == 0:\n print('Even')\nelse:\n print('Odd')\n", "step-4": "a=int(raw_input())\nif (a%2)==0:\n\tprint(\"Even\")\nelse:\n\tprint(\"Odd\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pandas as pd import numpy as np import os import matplotlib.pyplot as plt from datetime import datetime import statsmodels.api as sm from quant.stock.stock import Stock from quant.stock.date import Date from quant.utility_fun.factor_preprocess import FactorPreProcess from quant.utility_fun.write_excel import WriteExcel def factor_neutral(factor_series, neutral_frame): """ 中性化 """ concat_data = pd.concat([factor_series, neutral_frame], axis=1) concat_data = concat_data.dropna() factor_val = concat_data.ix[:, 0] neutral_val = concat_data.ix[:, 1:] model = sm.OLS(factor_val.values, neutral_val.values) regress = model.fit() params = regress.params params = pd.DataFrame(params, index=neutral_val.columns, columns=['param']) factor_res = factor_val - regress.predict(neutral_val) return params, factor_res def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period): # param ############################################################################################################### ############################################################################################################### group_number = 8 year_trade_days = 242 min_stock_number = 100 out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\' alpha_remove_extreme_value = True # alpha 因子 取极值 alpha_standard = True # alpha 因子 标准化 alpha_industry_neutral = True # alpha 因子 行业中性 alpha_barra_style_neutral = True # alpha 因子 风格中性 # read data ############################################################################################################### ############################################################################################################### price = Stock().get_factor_h5("PriceCloseAdjust", None, "alpha_dfc") alpha_val = Stock().get_factor_h5(factor_name, None, "alpha_dfc") industry = Stock().get_factor_h5("industry_citic1", None, "primary_mfc") industry = industry.applymap(lambda x: x.decode('utf-8')) [alpha_val, industry] = FactorPreProcess().make_same_index_columns([alpha_val, industry]) if alpha_barra_style_neutral: size = Stock().get_factor_h5("NORMAL_CNE5_SIZE", None, 'barra_risk_dfc') beta = Stock().get_factor_h5("NORMAL_CNE5_BETA", None, 'barra_risk_dfc') nolin_size = Stock().get_factor_h5("NORMAL_CNE5_NON_LINEAR_SIZE", None, 'barra_risk_dfc') momentum = Stock().get_factor_h5("NORMAL_CNE5_MOMENTUM", None, 'barra_risk_dfc') [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([size, beta, nolin_size]) beg_date = max(beg_date, price.columns[0], alpha_val.columns[0], beta.columns[0]) end_date = min(end_date, price.columns[-1], alpha_val.columns[-1], beta.columns[-1]) else: beg_date = max(beg_date, price.columns[0], alpha_val.columns[0]) end_date = min(end_date, price.columns[-1], alpha_val.columns[-1]) date_series = Date().get_trade_date_series(beg_date, end_date, period=cal_period) date_series = list(set(date_series) & set(alpha_val.columns)) date_series.sort() # pre process data ############################################################################################################### ############################################################################################################### if alpha_remove_extreme_value: alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val) if alpha_standard: alpha_val = FactorPreProcess().standardization(alpha_val) # cal everyday ############################################################################################################### ############################################################################################################### alpha_return = pd.DataFrame([], index=date_series) alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index) for i_date in range(len(date_series) - 2): cur_cal_date = date_series[i_date] next_cal_date = date_series[i_date + 1] buy_date = Date().get_trade_date_offset(cur_cal_date, 1) sell_date = Date().get_trade_date_offset(next_cal_date, 1) print(" Calculating Factor %s Alpha Return At %s" % (factor_name, cur_cal_date)) alpha_return.index.name = 'CalDate' alpha_return.ix[cur_cal_date, "BuyDate"] = buy_date alpha_return.ix[cur_cal_date, "SellDate"] = sell_date alpha_date = alpha_val[cur_cal_date] buy_price = price[buy_date] sell_price = price[sell_date] pct_date = sell_price / buy_price - 1.0 if alpha_industry_neutral: try: industry_date = industry[cur_cal_date] industry_dummy = pd.get_dummies(industry_date) except: continue if len(pd.concat([alpha_date, industry_date], axis=1).dropna()) < min_stock_number: continue else: params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=industry_dummy) alpha_date = factor_res alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date) alpha_date = FactorPreProcess().standardization(alpha_date) if alpha_barra_style_neutral: try: size_date = size[cur_cal_date] beta_date = beta[cur_cal_date] nolin_size_date = nolin_size[cur_cal_date] momentum_date = momentum[cur_cal_date] except: continue if len(pd.concat([alpha_date, size_date], axis=1).dropna()) < min_stock_number: continue else: barra_risk_exposure = pd.concat([beta_date, size_date, nolin_size_date, momentum_date], axis=1) barra_risk_exposure.columns = ['beta', 'size', 'nolin_size', 'momentum'] params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=barra_risk_exposure) alpha_date = factor_res alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date) alpha_date = FactorPreProcess().standardization(alpha_date) alpha_exposure.ix[cur_cal_date, :] = alpha_date res = pd.concat([alpha_date, pct_date], axis=1) res.columns = ['alpha_val', 'period_pct'] res = res.dropna() res = res.sort_values(by=['alpha_val'], ascending=False) labels = ["group_" + str(i) for i in list(range(1, group_number + 1))] res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=labels) period_return = (res['alpha_val'] * res['period_pct']).mean() alpha_return.ix[cur_cal_date, "FactorReturn"] = period_return information_correlation = res['alpha_val'].corr(res['period_pct']) alpha_return.ix[cur_cal_date, "IC"] = information_correlation group_pct = res.groupby(by=['group'])['period_pct'].mean() for i_label in range(len(labels)): alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[i_label] alpha_return = alpha_return.dropna(subset=['FactorReturn']) alpha_return["CumFactorReturn"] = alpha_return['FactorReturn'].cumsum() cum_labels = ["Cum_" + str(x) for x in labels] alpha_return[cum_labels] = alpha_return[labels].cumsum() # plot ############################################################################################################### ############################################################################################################### # plt_col = [] # plt_col.append("CumFactorReturn") # plt_col.extend(cum_labels) # alpha_return[plt_col].plot() # plt.title(factor_name) # plt.show() # describe annual ############################################################################################################### ############################################################################################################### back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1) back_test_end_date = Date().get_trade_date_offset(date_series[len(date_series) - 1], 1) back_test_days = Date().get_trade_date_diff(back_test_beg_date, back_test_end_date) backtest_year = back_test_days / year_trade_days alpha_return['year'] = alpha_return.index.map(lambda x: datetime.strptime(x, "%Y%m%d").year) year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum() year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count() year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean() year_ic_std = alpha_return.groupby(by=['year'])['IC'].std() year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean() year_describe = pd.concat([year_factor_return, year_count, year_ic_mean, year_ic_std, year_gp_mean], axis=1) col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std'] col.extend(labels) year_describe.columns = col year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'] / year_describe['Count'] * year_count year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'] * np.sqrt(50) year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return["CumFactorReturn"].values[-1] / backtest_year year_describe.ix['Sum', 'IC_IR'] = alpha_return["IC"].mean() / alpha_return["IC"].std() * np.sqrt(50) year_describe.ix['Sum', 'IC_mean'] = alpha_return["IC"].mean() year_describe.ix['Sum', 'IC_std'] = alpha_return["IC"].std() year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum() year_describe.index = year_describe.index.map(str) for i in range(len(year_describe)): year = year_describe.index[i] corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index=labels, columns=['group_return']) corr_pd['group_number'] = (list(range(1, group_number+1))) year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1] # save data ############################################################################################################### ############################################################################################################### # alpha_exposure_neutral ############################################################################################################### alpha_exposure = alpha_exposure.astype(np.float) filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name + "_FactorExposureNeutral.csv") alpha_exposure.T.to_csv(filename) # exposure_corr ############################################################################################################### exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=['Exposure_Corr']) for i_date in range(1, len(alpha_exposure.index)): last_exposure_date = alpha_exposure.index[i_date-1] cur_exposure_date = alpha_exposure.index[i_date] exposure_adjoin = alpha_exposure.ix[last_exposure_date:cur_exposure_date, :] exposure_adjoin = exposure_adjoin.T.dropna() exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'] = exposure_adjoin.corr().ix[0, 1] exposure_corr = exposure_corr.dropna() exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'].mean() filename = os.path.join(out_path, 'alpha_exposure_stability', factor_name + "_FactorExposureCorr.csv") exposure_corr.to_csv(filename) # Factor Return ############################################################################################################### filename = os.path.join(out_path, 'alpha_return', factor_name + "_FactorReturn.xlsx") sheet_name = "FactorReturn" we = WriteExcel(filename) ws = we.add_worksheet(sheet_name) num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=['format']) num_format_pd.ix['format', :] = '0.00%' num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00' we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number=1, num_format_pd=num_format_pd, color="blue", fillna=True) num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=['format']) num_format_pd.ix['format', :] = '0.00%' num_format_pd.ix['format', ['year']] = '0' we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=2+len(year_describe.columns), num_format_pd=num_format_pd, color="blue", fillna=True) we.close() ############################################################################################################### if __name__ == '__main__': cal_period = "W" beg_date = "20040101" end_date = datetime.today().strftime("%Y%m%d") path = "E:\\3_Data\\5_stock_data\\3_alpha_model\\" file = "MyAlpha.xlsx" data = pd.read_excel(os.path.join(path, file), encoding='gbk') data = data[data['计算因子收益率'] == "是"] data = data.reset_index(drop=True) for i in range(0, len(data)): factor_name = data.ix[i, "因子名"] print("#################### 开始计算因子收益率 %s 数据 ####################" % factor_name) cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period) print("#################### 结束计算因子收益率 %s 数据 ####################" % factor_name)
normal
{ "blob_id": "1d0730e8fd120e1c4bc5b89cbd766234e1fa3bca", "index": 2197, "step-1": "<mask token>\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\nif __name__ == '__main__':\n cal_period = 'W'\n beg_date = '20040101'\n end_date = datetime.today().strftime('%Y%m%d')\n path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n file = 'MyAlpha.xlsx'\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == '是']\n data = data.reset_index(drop=True)\n for i in range(0, len(data)):\n factor_name = data.ix[i, '因子名']\n print('#################### 开始计算因子收益率 %s 数据 ####################' %\n factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print('#################### 结束计算因子收益率 %s 数据 ####################' %\n factor_name)\n", "step-4": "import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport statsmodels.api as sm\nfrom quant.stock.stock import Stock\nfrom quant.stock.date import Date\nfrom quant.utility_fun.factor_preprocess import FactorPreProcess\nfrom quant.utility_fun.write_excel import WriteExcel\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\nif __name__ == '__main__':\n cal_period = 'W'\n beg_date = '20040101'\n end_date = datetime.today().strftime('%Y%m%d')\n path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n file = 'MyAlpha.xlsx'\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == '是']\n data = data.reset_index(drop=True)\n for i in range(0, len(data)):\n factor_name = data.ix[i, '因子名']\n print('#################### 开始计算因子收益率 %s 数据 ####################' %\n factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print('#################### 结束计算因子收益率 %s 数据 ####################' %\n factor_name)\n", "step-5": "import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport statsmodels.api as sm\nfrom quant.stock.stock import Stock\nfrom quant.stock.date import Date\nfrom quant.utility_fun.factor_preprocess import FactorPreProcess\nfrom quant.utility_fun.write_excel import WriteExcel\n\n\ndef factor_neutral(factor_series, neutral_frame):\n\n \"\"\"\n 中性化\n \"\"\"\n\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n\n # param\n ###############################################################################################################\n ###############################################################################################################\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n\n alpha_remove_extreme_value = True # alpha 因子 取极值\n alpha_standard = True # alpha 因子 标准化\n alpha_industry_neutral = True # alpha 因子 行业中性\n alpha_barra_style_neutral = True # alpha 因子 风格中性\n\n # read data\n ###############################################################################################################\n ###############################################################################################################\n price = Stock().get_factor_h5(\"PriceCloseAdjust\", None, \"alpha_dfc\")\n alpha_val = Stock().get_factor_h5(factor_name, None, \"alpha_dfc\")\n industry = Stock().get_factor_h5(\"industry_citic1\", None, \"primary_mfc\")\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n \n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([alpha_val, industry])\n \n if alpha_barra_style_neutral:\n \n size = Stock().get_factor_h5(\"NORMAL_CNE5_SIZE\", None, 'barra_risk_dfc')\n beta = Stock().get_factor_h5(\"NORMAL_CNE5_BETA\", None, 'barra_risk_dfc')\n nolin_size = Stock().get_factor_h5(\"NORMAL_CNE5_NON_LINEAR_SIZE\", None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5(\"NORMAL_CNE5_MOMENTUM\", None, 'barra_risk_dfc')\n\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0], beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1], beta.columns[-1])\n\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n\n date_series = Date().get_trade_date_series(beg_date, end_date, period=cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n\n # pre process data\n ###############################################################################################################\n ###############################################################################################################\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n\n # cal everyday\n ###############################################################################################################\n ###############################################################################################################\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n\n for i_date in range(len(date_series) - 2):\n\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(\" Calculating Factor %s Alpha Return At %s\" % (factor_name, cur_cal_date))\n\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, \"BuyDate\"] = buy_date\n alpha_return.ix[cur_cal_date, \"SellDate\"] = sell_date\n\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n\n if alpha_industry_neutral:\n\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n\n if alpha_barra_style_neutral:\n\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size', 'momentum']\n params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n\n labels = [\"group_\" + str(i) for i in list(range(1, group_number + 1))]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=labels)\n\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, \"FactorReturn\"] = period_return\n\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, \"IC\"] = information_correlation\n\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[i_label]\n\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return[\"CumFactorReturn\"] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [\"Cum_\" + str(x) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n\n # plot\n ###############################################################################################################\n ###############################################################################################################\n # plt_col = []\n # plt_col.append(\"CumFactorReturn\")\n # plt_col.extend(cum_labels)\n # alpha_return[plt_col].plot()\n # plt.title(factor_name)\n # plt.show()\n\n # describe annual\n ###############################################################################################################\n ###############################################################################################################\n\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date, back_test_end_date)\n\n backtest_year = back_test_days / year_trade_days\n\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.strptime(x, \"%Y%m%d\").year)\n\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum()\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean, year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'] * np.sqrt(50)\n\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\"CumFactorReturn\"].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return[\"IC\"].mean() / alpha_return[\"IC\"].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return[\"IC\"].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return[\"IC\"].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index=labels, columns=['group_return'])\n corr_pd['group_number'] = (list(range(1, group_number+1)))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n\n # save data\n ###############################################################################################################\n ###############################################################################################################\n\n # alpha_exposure_neutral\n ###############################################################################################################\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name + \"_FactorExposureNeutral.csv\")\n alpha_exposure.T.to_csv(filename)\n\n # exposure_corr\n ###############################################################################################################\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=['Exposure_Corr'])\n\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date-1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'] = exposure_adjoin.corr().ix[0, 1]\n\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', factor_name + \"_FactorExposureCorr.csv\")\n exposure_corr.to_csv(filename)\n\n # Factor Return\n ###############################################################################################################\n filename = os.path.join(out_path, 'alpha_return', factor_name + \"_FactorReturn.xlsx\")\n sheet_name = \"FactorReturn\"\n\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=['format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number=1,\n num_format_pd=num_format_pd, color=\"blue\", fillna=True)\n\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=['format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=2+len(year_describe.columns),\n num_format_pd=num_format_pd, color=\"blue\", fillna=True)\n we.close()\n ###############################################################################################################\n\n\nif __name__ == '__main__':\n\n cal_period = \"W\"\n beg_date = \"20040101\"\n end_date = datetime.today().strftime(\"%Y%m%d\")\n\n path = \"E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\\"\n file = \"MyAlpha.xlsx\"\n\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == \"是\"]\n data = data.reset_index(drop=True)\n\n for i in range(0, len(data)):\n\n factor_name = data.ix[i, \"因子名\"]\n print(\"#################### 开始计算因子收益率 %s 数据 ####################\" % factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print(\"#################### 结束计算因子收益率 %s 数据 ####################\" % factor_name)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Fix a method's vtable calls + reference making #@author simo #@category iOS.kernel #@keybinding R #@toolbar logos/refs.png #@description Resolve references for better CFG # -*- coding: utf-8 -*- """ script which does the following: - adds references to virtual method calls - Identifies methods belong to a specific namespace - Handles multi value vtable reference (multi-nodes) """ from utils.references import * if __name__ == "__main__": fix_extra_refs(currentAddress)
normal
{ "blob_id": "30a57197e3156023ac9a7c4a5218bfe825e143d9", "index": 5978, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n fix_extra_refs(currentAddress)\n", "step-3": "<mask token>\nfrom utils.references import *\nif __name__ == '__main__':\n fix_extra_refs(currentAddress)\n", "step-4": "# Fix a method's vtable calls + reference making\n\n#@author simo\n#@category iOS.kernel\n#@keybinding R\n#@toolbar logos/refs.png\n#@description Resolve references for better CFG\n# -*- coding: utf-8 -*-\n\n\"\"\"\nscript which does the following:\n- adds references to virtual method calls\n- Identifies methods belong to a specific namespace\n- Handles multi value vtable reference (multi-nodes)\n\"\"\"\n\nfrom utils.references import *\n\nif __name__ == \"__main__\":\n fix_extra_refs(currentAddress)\n \n \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import warnings warnings.filterwarnings('ignore', category=FutureWarning) from cv2 import cv2 from tqdm import tqdm import os import pickle import numpy as np import csv import sys from collections import defaultdict from dataset_utils import * sys.path.append("../training") from dataset_tools import enclosing_square, add_margin, DataGenerator EXT_ROOT = os.path.dirname(os.path.abspath(__file__)) rafdb_labels = { "age_group": { "0-3": 0, "4-19": 1, "20-39": 2, "40-69": 3, "70+":4 }, "race": { "Caucasian": 0, "African-American": 1, "Asian": 2 } } # converted labels rafDBmeta = defaultdict(dict) # multitask labels rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose rafDBdata = None # dict({image_path: ... }) # for ensembling purpose # ORDER: Gender, Age, Ethnicity, Emotion def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False): global rafDBdata if rafDBdata is None: rafDBdata = dict() i, errors = 0, defaultdict(set) for image_path, image_meta in input_meta.items(): identity = image_meta["identity"] roi = None # aligned image, roi is the image size rafDBdata[image_path] = { "roi" : roi, "identity" : identity, "gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE, "age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE, "ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE, "emotion": get_emotion_label(image_meta["emotion"]), "sample_num" : i } i += 1 print("Metadata:", len(rafDBdata)) if errors: print("Gender errors", errors["gender"]) print("Age errors", errors["age"]) print("Ethnicity errors", errors["ethnicity"]) # Labelling def get_gender_label(gender): if gender == 'male': return LABELS["gender"]["male"] elif gender == 'female': return LABELS["gender"]["female"] return MASK_VALUE def get_age_group_label(age_group_text): return rafdb_labels["age_group"][age_group_text] def get_ethnicity_label(ethnicity_text): return rafdb_labels["race"][ethnicity_text] def get_emotion_label(emotion): return LABELS["emotion"][emotion] # Load from csv def _load_meta_from_csv(csv_meta, output_dict): data = readcsv(csv_meta) for row in data: output_dict[row[0]]["gender"] = row[1] output_dict[row[0]]["age_group"] = row[2] output_dict[row[0]]["race"] = row[3] output_dict[row[0]]["emotion"] = row[4] output_dict[row[0]]["identity"] = row[0].split("_")[1] def get_partition(identity_label): global rafDBpartition try: faces, partition = rafDBpartition[identity_label] rafDBpartition[identity_label] = (faces + 1, partition) except KeyError: # split 20/80 stratified by identity l = (len(rafDBpartition) - 1) % 10 if l == 0 or l == 1: partition = PARTITION_VAL else: partition = PARTITION_TRAIN rafDBpartition[identity_label] = (1, partition) return partition def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None): data = list() discarded_items = defaultdict(list) for image_path, image_meta in tqdm(rafDBdata.items()): path = os.path.join(imagesdir, image_path) if ALIGNED: path = os.path.splitext(path) path = path[0] + "_aligned" + path[1] identity = image_meta["identity"] image = cv2.imread(path) if image is None: print("WARNING! Unable to read {}".format(image_path)) print(" - At {}".format(path)) discarded_items["unavailable_image"].append(identity) continue if np.max(image) == np.min(image): print("Blank image {}".format(image_path)) discarded_items["blank_image"].append(identity) continue sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity) gender = rafDBdata[image_path]["gender"] age = rafDBdata[image_path]["age_group"] ethnicity = rafDBdata[image_path]["ethnicity"] emotion = rafDBdata[image_path]["emotion"] labels = (gender, age, ethnicity, emotion) roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"] sample = { 'img': path, 'label': labels, 'roi': roi, 'part': sample_partition } data.append(sample) if debug_max_num_samples is not None and len(data) >= debug_max_num_samples: print("Stopped loading. Debug max samples: ", debug_max_num_samples) break print("Data loaded. {} samples".format(len(data))) print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"])) print("Discarded for blank image: ", len(discarded_items["blank_image"])) return data ALIGNED = True class RAFDBMulti: def __init__(self, partition='train', imagesdir='data/RAF-DB/basic/Image/{aligned}', csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv', target_shape=(112, 112, 3), augment=True, custom_augmentation=None, preprocessing='full_normalization', debug_max_num_samples=None, include_gender=False, include_age_group=False, include_race=False, **kwargs): partition_label = partition_select(partition) self.target_shape = target_shape self.custom_augmentation = custom_augmentation self.augment = augment self.gen = None self.preprocessing = preprocessing print('Loading %s data...' % partition) num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else '' cache_task = "{}{}{}_emotion".format( "_withgender" if include_gender else "", "_withagegroup" if include_age_group else "", "_withrace" if include_race else "" ) cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples) cache_file_name = os.path.join("cache", cache_file_name) cache_file_name = os.path.join(EXT_ROOT, cache_file_name) print("cache file name %s" % cache_file_name) try: with open(cache_file_name, 'rb') as f: self.data = pickle.load(f)[:debug_max_num_samples] print("Data loaded. %d samples, from cache" % (len(self.data))) except FileNotFoundError: print("Loading %s data from scratch" % partition) load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test" imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original")) csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition)) _load_meta_from_csv(csvmeta, rafDBmeta) _load_traits(rafDBmeta, include_gender, include_age_group, include_race) print("Loading {} dataset".format(partition)) loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples) print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label) if partition.startswith('test'): self.data = loaded_data else: self.data = [x for x in loaded_data if x['part'] == partition_label] with open(cache_file_name, 'wb') as f: print("Pickle dumping") pickle.dump(self.data, f) def get_data(self): return self.data def get_num_samples(self): return len(self.data) def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False): if self.gen is None: self.gen = DataGenerator(data=self.data, target_shape=self.target_shape, with_augmentation=self.augment, custom_augmentation=self.custom_augmentation, batch_size=batch_size, num_classes=self.get_num_classes(), preprocessing=self.preprocessing, fullinfo=fullinfo, doublelabel=doublelabel) return self.gen def get_num_classes(self): return CLASSES def test_multi(dataset="test", debug_samples=None): if dataset.startswith("train") or dataset.startswith("val"): print(dataset, debug_samples if debug_samples is not None else '') dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dt.get_generator() else: dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing='vggface2', debug_max_num_samples=debug_samples) gen = dv.get_generator() i = 0 for batch in tqdm(gen): for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]): facemax = np.max(im) facemin = np.min(im) print("Sample:", i) print("Labels:", gender, age, ethnicity, emotion) print("Gender:", verbose_gender(gender), "- Age:", verbose_age(age), "- Ethnicity:", verbose_ethnicity(ethnicity), "- Emotion:", verbose_emotion(emotion)) im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8) cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255)) cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im) i += 1 if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() return if '__main__' == __name__: test_multi("train") test_multi("val") test_multi("test")
normal
{ "blob_id": "0b7d1564ecbd78086d59629a2058716f41b4b8c8", "index": 9686, "step-1": "<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n", "step-4": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\nfrom dataset_utils import *\nsys.path.append('../training')\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\nrafdb_labels = {'age_group': {'0-3': 0, '4-19': 1, '20-39': 2, '40-69': 3,\n '70+': 4}, 'race': {'Caucasian': 0, 'African-American': 1, 'Asian': 2}}\nrafDBmeta = defaultdict(dict)\nrafDBpartition = dict()\nrafDBdata = None\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels['race'][ethnicity_text]\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\ndef get_partition(identity_label):\n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = faces + 1, partition\n except KeyError:\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = 1, partition\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\nALIGNED = True\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi('train')\n test_multi('val')\n test_multi('test')\n", "step-5": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\n\nfrom dataset_utils import *\n\nsys.path.append(\"../training\")\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\n\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nrafdb_labels = {\n \"age_group\": {\n \"0-3\": 0,\n \"4-19\": 1,\n \"20-39\": 2,\n \"40-69\": 3,\n \"70+\":4 \n },\n \"race\": {\n \"Caucasian\": 0,\n \"African-American\": 1,\n \"Asian\": 2\n }\n}\n\n# converted labels\nrafDBmeta = defaultdict(dict)\n\n# multitask labels\nrafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose\nrafDBdata = None # dict({image_path: ... }) # for ensembling purpose\n\n\n# ORDER: Gender, Age, Ethnicity, Emotion\ndef _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta[\"identity\"]\n roi = None # aligned image, roi is the image size\n rafDBdata[image_path] = {\n \"roi\" : roi,\n \"identity\" : identity,\n \"gender\" : get_gender_label(image_meta[\"gender\"]) if include_gender else MASK_VALUE,\n \"age_group\" : get_age_group_label(image_meta[\"age_group\"]) if include_age_group else MASK_VALUE,\n \"ethnicity\": get_ethnicity_label(image_meta[\"race\"]) if include_race else MASK_VALUE,\n \"emotion\": get_emotion_label(image_meta[\"emotion\"]),\n \"sample_num\" : i\n }\n i += 1 \n print(\"Metadata:\", len(rafDBdata))\n if errors:\n print(\"Gender errors\", errors[\"gender\"])\n print(\"Age errors\", errors[\"age\"])\n print(\"Ethnicity errors\", errors[\"ethnicity\"])\n\n\n# Labelling\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS[\"gender\"][\"male\"]\n elif gender == 'female':\n return LABELS[\"gender\"][\"female\"]\n return MASK_VALUE\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels[\"age_group\"][age_group_text]\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels[\"race\"][ethnicity_text]\n\ndef get_emotion_label(emotion):\n return LABELS[\"emotion\"][emotion]\n\n\n# Load from csv\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]][\"gender\"] = row[1]\n output_dict[row[0]][\"age_group\"] = row[2]\n output_dict[row[0]][\"race\"] = row[3]\n output_dict[row[0]][\"emotion\"] = row[4]\n output_dict[row[0]][\"identity\"] = row[0].split(\"_\")[1]\n\n\ndef get_partition(identity_label): \n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = (faces + 1, partition)\n except KeyError:\n # split 20/80 stratified by identity\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = (1, partition)\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + \"_aligned\" + path[1]\n identity = image_meta[\"identity\"]\n image = cv2.imread(path)\n if image is None:\n print(\"WARNING! Unable to read {}\".format(image_path))\n print(\" - At {}\".format(path))\n discarded_items[\"unavailable_image\"].append(identity)\n continue\n if np.max(image) == np.min(image):\n print(\"Blank image {}\".format(image_path))\n discarded_items[\"blank_image\"].append(identity)\n continue\n sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)\n gender = rafDBdata[image_path][\"gender\"]\n age = rafDBdata[image_path][\"age_group\"]\n ethnicity = rafDBdata[image_path][\"ethnicity\"]\n emotion = rafDBdata[image_path][\"emotion\"]\n labels = (gender, age, ethnicity, emotion)\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta[\"roi\"] is None else image_meta[\"roi\"] \n sample = {\n 'img': path,\n 'label': labels,\n 'roi': roi,\n 'part': sample_partition\n }\n data.append(sample)\n if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:\n print(\"Stopped loading. Debug max samples: \", debug_max_num_samples)\n break\n print(\"Data loaded. {} samples\".format(len(data)))\n print(\"Discarded for unavailable image: \", len(discarded_items[\"unavailable_image\"]))\n print(\"Discarded for blank image: \", len(discarded_items[\"blank_image\"]))\n return data\n\n\nALIGNED = True\n\nclass RAFDBMulti:\n def __init__(self,\n partition='train',\n imagesdir='data/RAF-DB/basic/Image/{aligned}',\n csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3),\n augment=True,\n custom_augmentation=None,\n preprocessing='full_normalization',\n debug_max_num_samples=None,\n include_gender=False,\n include_age_group=False,\n include_race=False,\n **kwargs):\n \n partition_label = partition_select(partition)\n\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n\n num_samples = \"_\" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''\n cache_task = \"{}{}{}_emotion\".format(\n \"_withgender\" if include_gender else \"\",\n \"_withagegroup\" if include_age_group else \"\",\n \"_withrace\" if include_race else \"\"\n )\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join(\"cache\", cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print(\"cache file name %s\" % cache_file_name)\n\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print(\"Data loaded. %d samples, from cache\" % (len(self.data)))\n except FileNotFoundError:\n print(\"Loading %s data from scratch\" % partition)\n load_partition = \"train\" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else \"test\"\n\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\"aligned\" if ALIGNED else \"original\"))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))\n\n _load_meta_from_csv(csvmeta, rafDBmeta)\n\n _load_traits(rafDBmeta, include_gender, include_age_group, include_race)\n \n print(\"Loading {} dataset\".format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)\n\n print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] == partition_label]\n with open(cache_file_name, 'wb') as f:\n print(\"Pickle dumping\")\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data,\n target_shape=self.target_shape,\n with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation,\n batch_size=batch_size,\n num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, \n fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset=\"test\", debug_samples=None):\n\n if dataset.startswith(\"train\") or dataset.startswith(\"val\"):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset,\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test',\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print(\"Sample:\", i)\n print(\"Labels:\", gender, age, ethnicity, emotion)\n print(\"Gender:\", verbose_gender(gender),\n \"- Age:\", verbose_age(age),\n \"- Ethnicity:\", verbose_ethnicity(ethnicity),\n \"- Emotion:\", verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8)\n cv2.putText(im, \"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)),\n (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow(\"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi(\"train\")\n test_multi(\"val\")\n test_multi(\"test\")\n", "step-ids": [ 9, 12, 13, 18, 19 ] }
[ 9, 12, 13, 18, 19 ]
import pymel.core as PM import socket def getShadingGroupMembership(): ''' Get a dictionary of shading group set information {'shadingGroup': [assignmnet1, assignment2...]} ''' result = {} #sgs = PM.ls(sl= 1, et='shadingEngine') sgs = PM.listConnections(s= 1, t='shadingEngine') for sg in sgs: result[sg.name()] = sg.members(flatten=True) return result def remoteMaye(msg): global maya maya.send(msg) def vmtl_nameMap(name): whiteList = ['woman_Rig:surfaceShader1', 'lady_Rig:surfaceShader1', 'richman_rigging_master:richman_spot', 'oldman_Rig:surfaceShader1'] if name == 'oldman_Rig:VRayMtl2': name = 'richPeopleSuck:oldman_cloth_vmtl' if name == 'oldman_Rig:VRayMtl3': name = 'richPeopleSuck:oldman_skin_vmtl' if name == 'oldman_Rig:VRayMtl4': name = 'richPeopleSuck:oldman_glass_vmtl' if name == 'lady_Rig:VRayMtl2': name = 'richPeopleSuck:lady_cloth_vmtl' if name == 'lady_Rig:VRayMtl1': name = 'richPeopleSuck:lady_skin_vmtl' if name == 'woman_Rig:VRayMtl1': name = 'richPeopleSuck:woman_cloth_vmtl' if name == 'woman_Rig:VRayMtl2': name = 'richPeopleSuck:woman_skin_vmtl' if name == 'richman_rigging_master:VRayMtl2': name = 'richPeopleSuck:richman_cloth_vmtl' if name == 'richman_rigging_master:VRayMtl1': name = 'richPeopleSuck:richman_skin_vmtl' if name == 'richman_rigging_master:surfaceShader3': name = 'richPeopleSuck:maneye_black_surface' if name in whiteList: name = 'richPeopleSuck:maneye_white_surface' return name def doJob(port): host = "127.0.0.1" global maya maya = socket.socket(socket.AF_INET, socket.SOCK_STREAM) maya.connect( (host, port) ) mtlDict = getShadingGroupMembership() for meshList in mtlDict.keys(): vmtl = cmds.listConnections(meshList + '.surfaceShader', s= 1)[0] if mtlDict[meshList]: for mesh in mtlDict[meshList]: msg = '' target = '' if '.' in str(mesh): faceList = [] faceStr = str(mesh).split('.f')[1].replace('[', '').replace(']', '') if ',' in faceStr: faceList = faceStr.split(',') else: faceList = [faceStr] for face in faceList: target = str(mesh).split('.')[0] + '.f[' + face + ']' try: msg += 'cmds.select("' + target + '", r= 1)\n' msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n' except: if len(target.split(':')) > 1: target_1 = ':'.join(target.split(':')[0:2]) + ']' target_2 = ':'.join([target.split(':')[0], target.split(':')[2]]) try: msg += 'cmds.select("' + target_1 + '", r= 1)\n' msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n' except: print '+++++++++++++++++++++++++++++++++++++\n+++++++++++++++++++++++++++++++++++++' else: target = str(mesh) msg += 'cmds.select("' + target + '", r= 1)\n' msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n' remoteMaye(msg) maya.close()
normal
{ "blob_id": "4e38ad17ad66ac71b0df3cbcaa33cb546e96ce9d", "index": 2257, "step-1": "import pymel.core as PM\nimport socket\n\ndef getShadingGroupMembership():\n '''\n Get a dictionary of shading group set information\n {'shadingGroup': [assignmnet1, assignment2...]}\n '''\n result = {}\n #sgs = PM.ls(sl= 1, et='shadingEngine')\n sgs = PM.listConnections(s= 1, t='shadingEngine')\n for sg in sgs:\n result[sg.name()] = sg.members(flatten=True)\n return result\n\ndef remoteMaye(msg):\n global maya\n maya.send(msg)\n\ndef vmtl_nameMap(name):\n whiteList = ['woman_Rig:surfaceShader1',\n 'lady_Rig:surfaceShader1',\n 'richman_rigging_master:richman_spot',\n 'oldman_Rig:surfaceShader1']\n if name == 'oldman_Rig:VRayMtl2':\n name = 'richPeopleSuck:oldman_cloth_vmtl'\n if name == 'oldman_Rig:VRayMtl3':\n name = 'richPeopleSuck:oldman_skin_vmtl'\n if name == 'oldman_Rig:VRayMtl4':\n name = 'richPeopleSuck:oldman_glass_vmtl'\n if name == 'lady_Rig:VRayMtl2':\n name = 'richPeopleSuck:lady_cloth_vmtl'\n if name == 'lady_Rig:VRayMtl1':\n name = 'richPeopleSuck:lady_skin_vmtl'\n if name == 'woman_Rig:VRayMtl1':\n name = 'richPeopleSuck:woman_cloth_vmtl'\n if name == 'woman_Rig:VRayMtl2':\n name = 'richPeopleSuck:woman_skin_vmtl'\n if name == 'richman_rigging_master:VRayMtl2':\n name = 'richPeopleSuck:richman_cloth_vmtl'\n if name == 'richman_rigging_master:VRayMtl1':\n name = 'richPeopleSuck:richman_skin_vmtl'\n if name == 'richman_rigging_master:surfaceShader3':\n name = 'richPeopleSuck:maneye_black_surface'\n if name in whiteList:\n name = 'richPeopleSuck:maneye_white_surface'\n\n return name\n\n\ndef doJob(port):\n\n host = \"127.0.0.1\"\n global maya\n maya = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n maya.connect( (host, port) )\n\n mtlDict = getShadingGroupMembership()\n\n for meshList in mtlDict.keys():\n vmtl = cmds.listConnections(meshList + '.surfaceShader', s= 1)[0]\n if mtlDict[meshList]:\n for mesh in mtlDict[meshList]:\n msg = ''\n target = ''\n if '.' in str(mesh):\n faceList = []\n faceStr = str(mesh).split('.f')[1].replace('[', '').replace(']', '')\n if ',' in faceStr:\n faceList = faceStr.split(',')\n else:\n faceList = [faceStr]\n for face in faceList:\n target = str(mesh).split('.')[0] + '.f[' + face + ']'\n try:\n msg += 'cmds.select(\"' + target + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n except:\n if len(target.split(':')) > 1:\n target_1 = ':'.join(target.split(':')[0:2]) + ']'\n target_2 = ':'.join([target.split(':')[0], target.split(':')[2]])\n try:\n msg += 'cmds.select(\"' + target_1 + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n except:\n print '+++++++++++++++++++++++++++++++++++++\\n+++++++++++++++++++++++++++++++++++++'\n else:\n target = str(mesh)\n msg += 'cmds.select(\"' + target + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n\n remoteMaye(msg)\n\n maya.close()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/home/nick/.virtualenvs/twitterbots/bin/python3.5 # -*- coding: utf-8 -*- import tweepy import sqlite3 from configparser import ConfigParser ''' A little OOP would be good later for authenticated user data, c, conn, api ''' def main(): Collector.collect() class Collector: # Main function def collect(): api = Collector.get_api() tweet_dump = Collector.all_tweet_db() c = tweet_dump[0] conn = tweet_dump[1] last_list = Collector.last_tweets(c, conn) # Look for new friends, add to db new_friends = Collector.new_f_check(api, c) Collector.download_to_limit(api, c, conn, new_friends) # Checks timelines of everyone in db already # adds anything new to db Collector.download_recent(api, c, conn, last_list) def get_api(): parser = ConfigParser() parser.read('twitter_auth.ini') consumer_key = parser.get('Keys', 'consumer_key').strip("'") consumer_secret = parser.get('Secrets', 'consumer_secret').strip("'") access_token = parser.get('Tokens', 'access_token').strip("'") access_token_secret = parser.get('Secrets', 'access_token_secret').strip("'") auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, wait_on_rate_limit=True) return api # connects to tweet_dump.db creates tdump if not exists # tdump stores all tweets from anyone in list def all_tweet_db(): conn = sqlite3.connect('tweet_dump_main.db') c = conn.cursor() c.execute('''CREATE TABLE IF NOT EXISTS tdump (tweet TEXT, username TEXT, tweet_date TEXT, tweet_id TEXT, tweet_source TEXT, user_id TEXT)''') return c, conn # connects to tweet_dump.db creats served if not exists # served stores tweets that are mention authenticated user def mention_tweet_db(): conn = sqlite3.connect('tweet_dump_main.db') c = conn.cursor() c.execute('''CREATE TABLE IF NOT EXISTS mentioned (tweet TEXT, username TEXT, tweet_date TEXT, tweet_id TEXT, tweet_source TEXT, user_id TEXT)''') return c, conn # looks for new friends by comparing authenticated # user's friend list with list of friends in tdump def new_f_check(api, c): # get list of user's ids c.execute('SELECT user_id FROM tdump') users = c.fetchall() users = list(set([user[0] for user in users])) # get list of friends_ids from twitter friends_ids = api.friends_ids() new_friends = [x for x in friends_ids if str(x) not in users] return new_friends # downloads up to 3200 of a user's most # recent tweets commits to tdump def download_to_limit(api, c, conn, friend_list): # List of tweet ids already in db c.execute('SELECT tweet_id FROM tdump') tweet_ids = c.fetchall() tweet_ids = [e[0] for e in tweet_ids] new_tweets = [] for friend in friend_list: try: # try to get most recent 200 tweets from friend get_tweets = api.user_timeline(id=friend, count=200) except Exception as e: continue # add to list of all of this friend's tweets new_tweets.extend(get_tweets) # find oldest retrieved tweet's id number less 1 oldest = new_tweets[-1].id - 1 # get tweets until 3200 limit hit while len(get_tweets) > 0: try: # max_id arg looks for id's less than arg's value get_tweets = api.user_timeline(id=friend, count=200, max_id=oldest) except Exception as e: continue new_tweets.extend(get_tweets) oldest = new_tweets[-1].id - 1 if len(new_tweets) != 0: print('Insert Active') for tweet in new_tweets: c.execute('''INSERT INTO tdump (tweet, username, tweet_date, tweet_id, tweet_source, user_id) VALUES(?,?,?,?,?,?)''', [tweet.text, tweet.user.screen_name, tweet.created_at, tweet.id_str, tweet.source, tweet.user.id_str]) conn.commit() if len(new_tweets) != 0: print('Insert Done' + '\n') # simply check if tweet text contains my screen name # change from hard code later def mention_me(new_tweet_list, c, conn): mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]] if len(new_tweet_list) != 0: print('Insert Active') for tweet in mentioned: c.execute('''INSERT INTO served (tweet, username, tweet_date, tweet_id, tweet_source, user_id) VALUES(?,?,?,?,?,?)''', [tweet.text, tweet.user.screen_name, tweet.created_at, tweet.id_str, tweet.source, tweet.user.id_str]) conn.commit() if len(new_tweet_list) != 0: print('Insert Done' + '\n') # returns list of user_id and created_at pairs # date associated with user_id is date of last # tweet in database def last_tweets(c, conn): # list of user ids and the date of the # last tweet in db user_last_tweets = [] # get list of user's ids c.execute('SELECT user_id FROM tdump') users = c.fetchall() users = list(set([user[0] for user in users])) for user in users: c.execute('''SELECT user_id, tweet_id FROM tdump WHERE user_id = ? ORDER BY tweet_date DESC''', [user]) last_tweet = c.fetchone() user_last_tweets.append(last_tweet) return user_last_tweets # downloads most recent posts in each users timelines def download_recent(api, c, conn, last_tweets): c.execute('SELECT tweet_id FROM tdump') tweet_ids = [x[0] for x in c.fetchall()] new_tweets = [] for pair in last_tweets: user_id = pair[0] tweet_id = pair[1] try: get_tweets = api.user_timeline(id=user_id, since_id=tweet_id, count=200) except Exception: continue if len(get_tweets) != 0: # add to list of all of this friend's tweets new_tweets.extend(get_tweets) # find newest retrieved tweet's id number plus 1 newest = get_tweets[0].id + 1 while len(get_tweets) > 0: try: # max_id arg looks for id's less than arg's value get_tweets = api.user_timeline(id=user_id, count=200, since_id=newest) new_tweets.extend(get_tweets) newest = get_tweets[0].id + 1 except Exception: continue if len(new_tweets) != 0: print('Insert Active') for tweet in new_tweets: if tweet.user.screen_name != 'BonneNick' \ and tweet.id not in tweet_ids: c.execute('''INSERT INTO tdump (tweet, username, tweet_date, tweet_id, tweet_source, user_id) VALUES(?,?,?,?,?,?)''', [tweet.text, tweet.user.screen_name, tweet.created_at, tweet.id_str, tweet.source, tweet.user.id_str]) conn.commit() conn.close() if len(new_tweets) != 0: print('Insert Done' + '\n') if __name__ == '__main__': main()
normal
{ "blob_id": "372d8c8cb9ec8f579db8588aff7799c73c5af255", "index": 519, "step-1": "<mask token>\n\n\nclass Collector:\n <mask token>\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n <mask token>\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n <mask token>\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n <mask token>\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Collector:\n <mask token>\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/home/nick/.virtualenvs/twitterbots/bin/python3.5\n# -*- coding: utf-8 -*-\n\nimport tweepy\nimport sqlite3\n\nfrom configparser import ConfigParser\n\n'''\nA little OOP would be good later for\nauthenticated user data, c, conn, api\n'''\n\n\ndef main():\n\n Collector.collect()\n\n\nclass Collector:\n\n # Main function\n def collect():\n\n api = Collector.get_api()\n\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n\n # Look for new friends, add to db\n new_friends = Collector.new_f_check(api, c)\n\n Collector.download_to_limit(api, c, conn, new_friends)\n\n # Checks timelines of everyone in db already\n # adds anything new to db\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys',\n 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets',\n 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens',\n 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets',\n 'access_token_secret').strip(\"'\")\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n return api\n\n # connects to tweet_dump.db creates tdump if not exists\n # tdump stores all tweets from anyone in list\n def all_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # connects to tweet_dump.db creats served if not exists\n # served stores tweets that are mention authenticated user\n def mention_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # looks for new friends by comparing authenticated\n # user's friend list with list of friends in tdump\n def new_f_check(api, c):\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n # get list of friends_ids from twitter\n friends_ids = api.friends_ids()\n\n new_friends = [x for x in friends_ids if str(x) not in users]\n\n return new_friends\n\n # downloads up to 3200 of a user's most\n # recent tweets commits to tdump\n def download_to_limit(api, c, conn, friend_list):\n\n # List of tweet ids already in db\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n\n new_tweets = []\n\n for friend in friend_list:\n\n try:\n # try to get most recent 200 tweets from friend\n get_tweets = api.user_timeline(id=friend, count=200)\n\n except Exception as e:\n\n continue\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find oldest retrieved tweet's id number less 1\n oldest = new_tweets[-1].id - 1\n\n # get tweets until 3200 limit hit\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=friend,\n count=200,\n max_id=oldest)\n\n except Exception as e:\n\n continue\n\n new_tweets.extend(get_tweets)\n\n oldest = new_tweets[-1].id - 1\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n # simply check if tweet text contains my screen name\n # change from hard code later\n def mention_me(new_tweet_list, c, conn):\n\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n\n if len(new_tweet_list) != 0:\n\n print('Insert Active')\n\n for tweet in mentioned:\n\n c.execute('''INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweet_list) != 0:\n\n print('Insert Done' + '\\n')\n\n # returns list of user_id and created_at pairs\n # date associated with user_id is date of last\n # tweet in database\n def last_tweets(c, conn):\n\n # list of user ids and the date of the\n # last tweet in db\n user_last_tweets = []\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n for user in users:\n\n c.execute('''SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC''',\n [user])\n\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n\n return user_last_tweets\n\n # downloads most recent posts in each users timelines\n def download_recent(api, c, conn, last_tweets):\n\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n\n new_tweets = []\n\n for pair in last_tweets:\n\n user_id = pair[0]\n tweet_id = pair[1]\n\n try:\n\n get_tweets = api.user_timeline(id=user_id,\n since_id=tweet_id,\n count=200)\n\n except Exception:\n\n continue\n\n if len(get_tweets) != 0:\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find newest retrieved tweet's id number plus 1\n newest = get_tweets[0].id + 1\n\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=user_id,\n count=200,\n since_id=newest)\n\n new_tweets.extend(get_tweets)\n\n newest = get_tweets[0].id + 1\n\n except Exception:\n\n continue\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n if tweet.user.screen_name != 'BonneNick' \\\n and tweet.id not in tweet_ids:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n conn.close()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n\n main()\n", "step-ids": [ 5, 9, 11, 12, 14 ] }
[ 5, 9, 11, 12, 14 ]
""" """ import os from alert_triage.util import filelock MODIFIED_ALERTS_FILE = "/tmp/alert_triage_modified_alerts" def read_modified_alert_ids(): """ Read modified alert IDs from file, then remove them from the file.""" # Return an empty list if the file doesn't exist. if not os.path.exists(MODIFIED_ALERTS_FILE): return [] # Get a lock on the file lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5) lock.acquire() # Open the file and read in the data. fp = open(MODIFIED_ALERTS_FILE, "r+") ids = fp.read().split("\n") # remove zero length strings ids = filter(len, ids) # convert IDs to int ids = list(map(int, ids)) # remove duplicates ids = list(set(ids)) # close and remove the file fp.close() #TODO: uncomment when live #os.unlink(MODIFIED_ALERTS_FILE) # Release the lock. lock.release() return ids def write_modified_alert_ids(ids): # Get a lock on the file lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5) lock.acquire() # Open the file and write the alert IDs. fp = open(MODIFIED_ALERTS_FILE, "a") for alert_id in ids: fp.write(str(alert_id) + "\n") fp.close() # Release the lock. lock.release()
normal
{ "blob_id": "90ae14d8af163343520365a5565a7c44de57059d", "index": 5662, "step-1": "<mask token>\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n", "step-3": "<mask token>\nMODIFIED_ALERTS_FILE = '/tmp/alert_triage_modified_alerts'\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n", "step-4": "<mask token>\nimport os\nfrom alert_triage.util import filelock\nMODIFIED_ALERTS_FILE = '/tmp/alert_triage_modified_alerts'\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n", "step-5": "\"\"\"\n\"\"\"\n\nimport os\n\nfrom alert_triage.util import filelock\n\nMODIFIED_ALERTS_FILE = \"/tmp/alert_triage_modified_alerts\"\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n # Return an empty list if the file doesn't exist.\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n # Get a lock on the file\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n # Open the file and read in the data.\n fp = open(MODIFIED_ALERTS_FILE, \"r+\")\n ids = fp.read().split(\"\\n\")\n # remove zero length strings\n ids = filter(len, ids)\n # convert IDs to int\n ids = list(map(int, ids))\n # remove duplicates\n ids = list(set(ids))\n # close and remove the file\n fp.close()\n #TODO: uncomment when live\n #os.unlink(MODIFIED_ALERTS_FILE)\n # Release the lock.\n lock.release()\n return ids\n\ndef write_modified_alert_ids(ids):\n # Get a lock on the file\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n # Open the file and write the alert IDs.\n fp = open(MODIFIED_ALERTS_FILE, \"a\")\n for alert_id in ids:\n fp.write(str(alert_id) + \"\\n\")\n fp.close()\n # Release the lock.\n lock.release()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import os from typing import Union, Tuple, List import pandas as pd from flags import FLAGS from helpers import load_from_pickle, decode_class, sort_results_by_metric ROOT = FLAGS.ROOT RESULTS_FOLDER = FLAGS.RESULTS_FOLDER FULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, "checkpoints") def eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results.xlsx")): with pd.ExcelWriter(excel_file_path, mode="w") as writer: for ts in time_stamps: print(f"Evaluating results for time stamp: {ts}") full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS, f"full_result_dict_{ts}.p") full_results_dict = load_from_pickle(full_results_dict_path) for run_id, results_dict in full_results_dict.items(): only_eval_dict = {cur_xval: [decode_class(data[3]) for data in data_list] for cur_xval, data_list in results_dict.items()} # convert to pandas dataframe df = pd.DataFrame(only_eval_dict) df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results_{run_id}.csv"), index=False, header=False) df.to_excel(writer, run_id) if __name__ == '__main__': time_stamps_to_eval = ["1616007514.9154973"] eval_results(time_stamps_to_eval) metric = "f1score" score_path_list, _ = sort_results_by_metric(os.path.join(ROOT, RESULTS_FOLDER, "checkpoints"), metric) print(f"{metric}: {[s for s, p in score_path_list]}")
normal
{ "blob_id": "5447bd3b08c22913ae50ee66ee81554d2357ef3e", "index": 3991, "step-1": "<mask token>\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n", "step-3": "<mask token>\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, 'checkpoints')\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n", "step-4": "import os\nfrom typing import Union, Tuple, List\nimport pandas as pd\nfrom flags import FLAGS\nfrom helpers import load_from_pickle, decode_class, sort_results_by_metric\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, 'checkpoints')\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n", "step-5": "import os\nfrom typing import Union, Tuple, List\n\nimport pandas as pd\n\nfrom flags import FLAGS\nfrom helpers import load_from_pickle, decode_class, sort_results_by_metric\n\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\n\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, \"checkpoints\")\n\n\ndef eval_results(time_stamps: Union[Tuple, List],\n excel_file_path=os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"xVal_results.xlsx\")):\n with pd.ExcelWriter(excel_file_path, mode=\"w\") as writer:\n for ts in time_stamps:\n print(f\"Evaluating results for time stamp: {ts}\")\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"full_result_dict_{ts}.p\")\n\n full_results_dict = load_from_pickle(full_results_dict_path)\n\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in data_list]\n for cur_xval, data_list in results_dict.items()}\n # convert to pandas dataframe\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"xVal_results_{run_id}.csv\"), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = [\"1616007514.9154973\"]\n eval_results(time_stamps_to_eval)\n\n metric = \"f1score\"\n\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT, RESULTS_FOLDER, \"checkpoints\"), metric)\n\n print(f\"{metric}: {[s for s, p in score_path_list]}\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import numpy as np import matplotlib.pyplot as plt from PIL import Image import cv2 import openslide class QualityPatch(): def __init__(self, original_img_path,label_img_path,patch_level,patch_size): """ parameter: original_img_path(str): the source of image label_img_path(str): label image patch_level(int): the level that the patch belongs to patch_size(tuple): size of patch(x,y) attributes: self.slide(Openslide): the slide that the patch belongs to self.original_img_path(str) : the path of the lide self.label_img_path(str) : label_img_path self.patch_level(int) : the level that the patch belongs to self.patch_size = patch_size self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline self.label(np array) : the image of label self.label_size(tuple) : the size of label self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1 """ self.slide = openslide.OpenSlide(original_img_path) slide_width, slide_height = self.slide.dimensions self.label = (cv2.imread(label_img_path,cv2.IMREAD_GRAYSCALE)/255) self.patch_coors = [(w,h) for w in range(0, slide_width - patch_size[0], patch_size[0]) for h in range(0, slide_height - patch_size[1],patch_size[1])] self.original_img_path = original_img_path self.label_img_path = label_img_path self.patch_level = patch_level self.patch_size = patch_size self.label = self.label.T self.level_dim = self.slide.level_dimensions[patch_level] self.label_size = self.label.shape self.scale = (self.label_size[0]/self.level_dim[0], self.label_size[1]/self.level_dim[1]) self.adj_patch_size_label = self.calculateAdjPatchSize() def calculateLabelCoordinates(self, patch_location): return (int(self.scale[0]*patch_location[0]/2**(self.patch_level)), int(self.scale[1]*patch_location[1]/2**(self.patch_level))) def calculateAdjPatchSize(self): return (int(self.scale[0] * self.patch_size[0])+1, int(self.scale[1] * self.patch_size[1])+1) def patchQualityInsurance(self, patch_location): label_coordinates = self.calculateLabelCoordinates(patch_location) percent = (np.sum(self.label[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]]))/(self.adj_patch_size_label[0]*self.adj_patch_size_label[1]) return percent def getLabelWithPatchLocation(self, patch_location): patch_image = np.ones(self.adj_patch_size_label)/2 label_with_patch_location = self.label.copy() label_coordinates = self.calculateLabelCoordinates(patch_location) label_with_patch_location[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]] = patch_image return label_with_patch_location.T def getReleventPatches(self): relevent_patches = [] for i, coor in enumerate(self.patch_coors): percent = self.patchQualityInsurance(coor) if percent > .5: relevent_patches.append([coor,percent]) if i % 10000 == 0: print(i, "/",len(self.patch_coors), "dic len", len(relevent_patches), " from", len(self.patch_coors) ) return relevent_patches def checkingfunction(self, checking_coors=(40000,90000)): if checking_coors[0] < 0 or checking_coors[0] < 0 or\ self.slide.level_dimensions[self.patch_level][0] < (checking_coors[0] / 2**(self.patch_level) + self.patch_size[0]) or\ self.slide.level_dimensions[self.patch_level][1] < ((checking_coors[1] / 2**(self.patch_level) + self.patch_size[1])): raise ValueError("the patch location with patch size is not valid.") image = self.slide.read_region(checking_coors, self.patch_level, self.patch_size) percent = self.patchQualityInsurance(checking_coors) fig, ax = plt.subplots(nrows=1, ncols=3) plt.tight_layout() ax[0].set_title("tissue percentage %.02f"%percent) ax[0].axis('off') ax[0].imshow(image) ax[1].set_title("tissue label") ax[1].axis('off') ax[1].imshow(self.label.T, cmap='gray') ax[2].set_title("label with patch") ax[2].axis('off') ax[2].imshow(self.getLabelWithPatchLocation(checking_coors)) plt.savefig("test/check_read_region"+str(self.patch_level)+'.png') plt.close('all')
normal
{ "blob_id": "0ad71f02e37f2744036b134c33e037a724fd38a6", "index": 8049, "step-1": "<mask token>\n\n\nclass QualityPatch:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n", "step-2": "<mask token>\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n <mask token>\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n", "step-3": "<mask token>\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return int(self.scale[0] * patch_location[0] / 2 ** self.patch_level\n ), int(self.scale[1] * patch_location[1] / 2 ** self.patch_level)\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport openslide\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return int(self.scale[0] * patch_location[0] / 2 ** self.patch_level\n ), int(self.scale[1] * patch_location[1] / 2 ** self.patch_level)\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n\n def getLabelWithPatchLocation(self, patch_location):\n patch_image = np.ones(self.adj_patch_size_label) / 2\n label_with_patch_location = self.label.copy()\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n label_with_patch_location[label_coordinates[0]:label_coordinates[0] +\n self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]] = patch_image\n return label_with_patch_location.T\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n", "step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport openslide\n\nclass QualityPatch():\n def __init__(self, original_img_path,label_img_path,patch_level,patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = (cv2.imread(label_img_path,cv2.IMREAD_GRAYSCALE)/255)\n self.patch_coors = [(w,h) for w in range(0, slide_width - patch_size[0], patch_size[0]) for h in range(0, slide_height - patch_size[1],patch_size[1])]\n\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n\n self.label_size = self.label.shape\n self.scale = (self.label_size[0]/self.level_dim[0], self.label_size[1]/self.level_dim[1])\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return (int(self.scale[0]*patch_location[0]/2**(self.patch_level)), int(self.scale[1]*patch_location[1]/2**(self.patch_level)))\n \n def calculateAdjPatchSize(self):\n return (int(self.scale[0] * self.patch_size[0])+1, int(self.scale[1] * self.patch_size[1])+1)\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = (np.sum(self.label[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]]))/(self.adj_patch_size_label[0]*self.adj_patch_size_label[1])\n\n return percent\n\n def getLabelWithPatchLocation(self, patch_location):\n patch_image = np.ones(self.adj_patch_size_label)/2\n label_with_patch_location = self.label.copy()\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n label_with_patch_location[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]] = patch_image\n return label_with_patch_location.T\n \n def getReleventPatches(self):\n relevent_patches = []\n\n\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > .5:\n relevent_patches.append([coor,percent])\n if i % 10000 == 0:\n print(i, \"/\",len(self.patch_coors), \"dic len\", len(relevent_patches), \" from\", len(self.patch_coors) )\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000,90000)):\n if checking_coors[0] < 0 or checking_coors[0] < 0 or\\\n self.slide.level_dimensions[self.patch_level][0] < (checking_coors[0] / 2**(self.patch_level) + self.patch_size[0]) or\\\n self.slide.level_dimensions[self.patch_level][1] < ((checking_coors[1] / 2**(self.patch_level) + self.patch_size[1])):\n raise ValueError(\"the patch location with patch size is not valid.\")\n \n image = self.slide.read_region(checking_coors, self.patch_level, self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title(\"tissue percentage %.02f\"%percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title(\"tissue label\")\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title(\"label with patch\")\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig(\"test/check_read_region\"+str(self.patch_level)+'.png')\n plt.close('all')\n", "step-ids": [ 3, 6, 7, 9, 10 ] }
[ 3, 6, 7, 9, 10 ]
#API End Points by Mitul import urllib.error, urllib.request, urllib.parse import json target = 'http://py4e-data.dr-chuck.net/json?' local = input('Enter location: ') url = target + urllib.parse.urlencode({'address': local, 'key' : 42}) print('Retriving', url) data = urllib.request.urlopen(url).read() print('Retrived', len(data), 'characters') js = json.loads(data) print(json.dumps(js, indent = 4)) print('Place id', js['results'][0]['place_id']) '''Output: Enter location: >? UIUC Retriving http://py4e-data.dr-chuck.net/json?address=UIUC&key=42 Retrived 1720 characters { "results": [ { "access_points": [], "address_components": [ { "long_name": "Champaign", "short_name": "Champaign", "types": [ "locality", "political" ] }, { "long_name": "Champaign County", "short_name": "Champaign County", "types": [ "administrative_area_level_2", "political" ] }, { "long_name": "Illinois", "short_name": "IL", "types": [ "administrative_area_level_1", "political" ] }, { "long_name": "United States", "short_name": "US", "types": [ "country", "political" ] } ], "formatted_address": "Champaign, IL, USA", "geometry": { "location": { "lat": 40.1019523, "lng": -88.2271615 }, "location_type": "GEOMETRIC_CENTER", "viewport": { "northeast": { "lat": 40.1033012802915, "lng": -88.22581251970848 }, "southwest": { "lat": 40.1006033197085, "lng": -88.2285104802915 } } }, "place_id": "ChIJ6VUmqSTXDIgR-iZoBCUFPKU", "plus_code": { "compound_code": "4Q2F+Q4 Champaign, Champaign City Township, IL, United States", "global_code": "86GH4Q2F+Q4" }, "types": [ "establishment", "point_of_interest", "university" ] } ], "status": "OK" } Place id ChIJ6VUmqSTXDIgR-iZoBCUFPKU '''
normal
{ "blob_id": "d34159536e860719094a36cfc30ffb5fcae72a9a", "index": 296, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Retriving', url)\n<mask token>\nprint('Retrived', len(data), 'characters')\n<mask token>\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n", "step-3": "<mask token>\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key': 42})\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n", "step-4": "import urllib.error, urllib.request, urllib.parse\nimport json\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key': 42})\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n", "step-5": "#API End Points by Mitul\nimport urllib.error, urllib.request, urllib.parse\nimport json\n\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key' : 42})\n\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent = 4))\nprint('Place id', js['results'][0]['place_id'])\n\n\n'''Output:\nEnter location: >? UIUC\nRetriving http://py4e-data.dr-chuck.net/json?address=UIUC&key=42\nRetrived 1720 characters\n{\n \"results\": [\n {\n \"access_points\": [],\n \"address_components\": [\n {\n \"long_name\": \"Champaign\",\n \"short_name\": \"Champaign\",\n \"types\": [\n \"locality\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Champaign County\",\n \"short_name\": \"Champaign County\",\n \"types\": [\n \"administrative_area_level_2\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Illinois\",\n \"short_name\": \"IL\",\n \"types\": [\n \"administrative_area_level_1\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"United States\",\n \"short_name\": \"US\",\n \"types\": [\n \"country\",\n \"political\"\n ]\n }\n ],\n \"formatted_address\": \"Champaign, IL, USA\",\n \"geometry\": {\n \"location\": {\n \"lat\": 40.1019523,\n \"lng\": -88.2271615\n },\n \"location_type\": \"GEOMETRIC_CENTER\",\n \"viewport\": {\n \"northeast\": {\n \"lat\": 40.1033012802915,\n \"lng\": -88.22581251970848\n },\n \"southwest\": {\n \"lat\": 40.1006033197085,\n \"lng\": -88.2285104802915\n }\n }\n },\n \"place_id\": \"ChIJ6VUmqSTXDIgR-iZoBCUFPKU\",\n \"plus_code\": {\n \"compound_code\": \"4Q2F+Q4 Champaign, Champaign City Township, IL, United States\",\n \"global_code\": \"86GH4Q2F+Q4\"\n },\n \"types\": [\n \"establishment\",\n \"point_of_interest\",\n \"university\"\n ]\n }\n ],\n \"status\": \"OK\"\n}\nPlace id ChIJ6VUmqSTXDIgR-iZoBCUFPKU\n'''\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 2.2.14 on 2020-08-25 17:00 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20200825_1318'), ] operations = [ migrations.RenameField( model_name='cv', old_name='additionalskills_text', new_name='additional_skills_text', ), migrations.RenameField( model_name='cv', old_name='additionalskills_title', new_name='additional_skills_title', ), migrations.RenameField( model_name='cv', old_name='workexperience_date', new_name='work_experience_date', ), migrations.RenameField( model_name='cv', old_name='workexperience_header', new_name='work_experience_header', ), migrations.RenameField( model_name='cv', old_name='workexperience_text', new_name='work_experience_text', ), migrations.RenameField( model_name='cv', old_name='workexperience_title', new_name='work_experience_title', ), ]
normal
{ "blob_id": "e296a5bea5465c2b84e37c7d83922adb01feab70", "index": 9828, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0004_auto_20200825_1318')]\n operations = [migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_text', new_name='additional_skills_text'),\n migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_title', new_name='additional_skills_title'),\n migrations.RenameField(model_name='cv', old_name=\n 'workexperience_date', new_name='work_experience_date'), migrations\n .RenameField(model_name='cv', old_name='workexperience_header',\n new_name='work_experience_header'), migrations.RenameField(\n model_name='cv', old_name='workexperience_text', new_name=\n 'work_experience_text'), migrations.RenameField(model_name='cv',\n old_name='workexperience_title', new_name='work_experience_title')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0004_auto_20200825_1318')]\n operations = [migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_text', new_name='additional_skills_text'),\n migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_title', new_name='additional_skills_title'),\n migrations.RenameField(model_name='cv', old_name=\n 'workexperience_date', new_name='work_experience_date'), migrations\n .RenameField(model_name='cv', old_name='workexperience_header',\n new_name='work_experience_header'), migrations.RenameField(\n model_name='cv', old_name='workexperience_text', new_name=\n 'work_experience_text'), migrations.RenameField(model_name='cv',\n old_name='workexperience_title', new_name='work_experience_title')]\n", "step-5": "# Generated by Django 2.2.14 on 2020-08-25 17:00\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0004_auto_20200825_1318'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='cv',\n old_name='additionalskills_text',\n new_name='additional_skills_text',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='additionalskills_title',\n new_name='additional_skills_title',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_date',\n new_name='work_experience_date',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_header',\n new_name='work_experience_header',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_text',\n new_name='work_experience_text',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_title',\n new_name='work_experience_title',\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import csv #ratings.csv must be in the same directory skipped_header = False with open("ratings.csv") as in_file: csvreader = csv.reader(in_file) #read each row of ratings.csv (userId,movieId,rating,timestamp) with open("ratings_train.csv", 'w') as train_out: with open("ratings_test.csv", 'w') as test_out: for row in csvreader: if not skipped_header: skipped_header = True continue elif int(row[0]) <= 146541: train_out.write(",".join(row[:-1])) train_out.write("\n") else: #rest of the data (16000 of them) test_out.write(",".join(row[:-1])) test_out.write("\n")
normal
{ "blob_id": "e48a6a84268a0fe64e90714bd32712665934fc39", "index": 2223, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n", "step-3": "<mask token>\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n", "step-4": "import csv\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n", "step-5": "import csv\r\n\r\n#ratings.csv must be in the same directory\r\n\r\nskipped_header = False\r\nwith open(\"ratings.csv\") as in_file:\r\n csvreader = csv.reader(in_file)\r\n\t#read each row of ratings.csv (userId,movieId,rating,timestamp)\r\n with open(\"ratings_train.csv\", 'w') as train_out:\r\n with open(\"ratings_test.csv\", 'w') as test_out:\r\n for row in csvreader:\r\n if not skipped_header:\r\n skipped_header = True\r\n continue\r\n elif int(row[0]) <= 146541:\r\n train_out.write(\",\".join(row[:-1]))\r\n train_out.write(\"\\n\")\r\n else: #rest of the data (16000 of them)\r\n test_out.write(\",\".join(row[:-1]))\r\n test_out.write(\"\\n\")\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# test_LeapYear.py # By Alex Graalum import unittest import LeapYear class test_leapyear(unittest.TestCase): def test_four(self): self.assertEqual(LeapYear.leapyear(2012), True) def test_hundred(self): self.assertEqual(LeapYear.leapyear(2100), False) def test_fourhundred(self): self.assertEqual(LeapYear.leapyear(2000), True) def test_normal(self): self.assertEqual(LeapYear.leapyear(2002), False) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "29cae66fdca65020a82212e5eabbc61eb900e543", "index": 7720, "step-1": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n <mask token>\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "import unittest\nimport LeapYear\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# test_LeapYear.py\n# By Alex Graalum\nimport unittest\nimport LeapYear\n\nclass test_leapyear(unittest.TestCase):\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n \nif __name__ == '__main__':\n unittest.main()\n \n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import os from cs50 import SQL from flask import Flask, flash, redirect, render_template, request, session from flask_session import Session from tempfile import mkdtemp from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError from werkzeug.security import check_password_hash, generate_password_hash from helpers import apology, login_required, lookup, usd # Configure application app = Flask(__name__) # Ensure templates are auto-reloaded app.config["TEMPLATES_AUTO_RELOAD"] = True # Ensure responses aren't cached @app.after_request def after_request(response): response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response # Custom filter app.jinja_env.filters["usd"] = usd # Configure session to use filesystem (instead of signed cookies) app.config["SESSION_FILE_DIR"] = mkdtemp() app.config["SESSION_PERMANENT"] = False app.config["SESSION_TYPE"] = "filesystem" Session(app) # Configure CS50 Library to use SQLite database db = SQL("sqlite:///finance.db") # Make sure API key is set if not os.environ.get("API_KEY"): raise RuntimeError("API_KEY not set") @app.route("/") @login_required def index(): """Show portfolio of stocks""" rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"]) cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"]) cash_=cash[0]["cash"] #store all the data into a dict so its easier to pass in to html display=[] total_share=0 for row in rows: symbol=str(row["symbol"]) print(symbol) name=lookup(symbol)["name"] shares=int(row["amount"]) price=float(lookup(symbol)["price"]) total=float(shares) *price total_share+=total display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total}) total_money=total_share+cash[0]["cash"] return render_template("index.html",display=display,total_money=total_money,cash=cash_) @app.route("/buy", methods=["GET", "POST"]) @login_required def buy(): """Buy shares of stock""" if request.method == "POST": # Ensure symbol was submitted if not request.form.get("symbol"): return apology("must provide symbol", 400) # Ensure shares was submitted elif not request.form.get("shares"): return apology("must provide shares", 400) if not request.form.get("shares").isdigit(): return apology("must be integer",400) elif int(request.form.get("shares"))<1 : return apology("must be positive integer", 400) elif lookup(request.form.get("symbol"))==None: return apology("Must be a valid symbol",400) #ensure money>price quote=lookup(request.form.get("symbol")) shares=request.form.get("shares") cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"]) if cash[0]["cash"]<int(quote["price"])*int(shares): return apology("You can't affort this/these",400) #BUY, STORE DATA IN REPOSITORY AND RECORD #record this transaction db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))",session["user_id"],int(shares),quote["symbol"],float(quote["price"])) #deduct the cash total=int(quote["price"])*int(shares) db.execute("UPDATE users SET cash=cash- (?) WHERE id=?",total,session["user_id"]) return redirect("/") else: return render_template("buy.html") @app.route("/history") @login_required def history(): """Show history of transactions""" rows=db.execute("SELECT * FROM record ORDER BY t1") return render_template("history.html",rows=rows) @app.route("/login", methods=["GET", "POST"]) def login(): """Log user in""" # Forget any user_id session.clear() # User reached route via POST (as by submitting a form via POST) if request.method == "POST": # Ensure username was submitted if not request.form.get("username"): return apology("must provide username", 403) # Ensure password was submitted elif not request.form.get("password"): return apology("must provide password", 403) # Query database for username rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username")) # Ensure username exists and password is correct if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")): return apology("invalid username and/or password", 403) # Remember which user has logged in session["user_id"] = rows[0]["id"] # Redirect user to home page return redirect("/") # User reached route via GET (as by clicking a link or via redirect) else: return render_template("login.html") @app.route("/logout") def logout(): """Log user out""" # Forget any user_id session.clear() # Redirect user to login form return redirect("/") @app.route("/quote", methods=["GET", "POST"]) @login_required def quote(): """Get stock quote.""" if request.method=="POST": quote=lookup(request.form.get("symbol")) if quote==None: return apology("Invalid symbol",400) price=usd(quote["price"]) return render_template("quoted.html",quote=quote,price=price) else: return render_template("quote.html") @app.route("/register", methods=["GET", "POST"]) def register(): """Register user""" if request.method == "POST": # Ensure username was submitted if not request.form.get("username"): return apology("must provide username", 400) # Ensure password was submitted elif not request.form.get("password"): return apology("must provide password", 400) # Ensure comfirm password was submitted elif not request.form.get("confirmation"): return apology("must comfirm password", 400) # Ensure password matches elif request.form.get("confirmation") != request.form.get("password"): return apology("Password not matches",400) # Ensure username is new(unique) rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username")) if len(rows) != 0: return apology("username used", 400) db.execute("INSERT INTO users (username,hash) VALUES (?,?)",request.form.get("username"),generate_password_hash(request.form.get("password"))) # Redirect user to home page return redirect("/") else: return render_template("register.html") @app.route("/sell", methods=["GET", "POST"]) @login_required def sell(): """Sell shares of stock""" if request.method=='POST': #parameter is not filled if not request.form.get("shares"): return apology("Please enter how much u want to sell",400) #check if shares(amount) that are going to be sell less than owner's share. sell=request.form.get("symbol") shares=request.form.get("shares") amount=db.execute("SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions",session["user_id"],sell) if amount[0]["amount"]<int(shares): return apology("You dont own that much shares",400) #record sell and add cash amount quote=lookup(sell) price=quote["price"] total=int(price)*int(shares) db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))",session["user_id"],(int(shares)*-1),quote["symbol"],price) db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"]) return redirect("/") else: rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"]) return render_template("sell.html",rows=rows) @app.route("/HAX", methods=["GET", "POST"]) @login_required def HAX(): #add free monei boiiii if request.method=="POST": total=request.form.get("HAX") db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"]) flash(u'HAX SUCCESSFULLY ACTIVATED!!!') return redirect("/") else: return render_template("HAX.html") def errorhandler(e): """Handle error""" if not isinstance(e, HTTPException): e = InternalServerError() return apology(e.name, e.code) # Listen for errors for code in default_exceptions: app.errorhandler(code)(errorhandler)
normal
{ "blob_id": "c66f4ee5719f764c8c713c23815302c00b6fb9af", "index": 310, "step-1": "<mask token>\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<mask token>\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\n<mask token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<mask token>\n", "step-3": "<mask token>\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "step-4": "import os\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom helpers import apology, login_required, lookup, usd\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "step-5": "import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required, lookup, usd\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n# Ensure responses aren't cached\[email protected]_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# Make sure API key is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n\[email protected](\"/\")\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n cash_=cash[0][\"cash\"]\n\n #store all the data into a dict so its easier to pass in to html\n display=[]\n total_share=0\n for row in rows:\n symbol=str(row[\"symbol\"])\n print(symbol)\n name=lookup(symbol)[\"name\"]\n shares=int(row[\"amount\"])\n price=float(lookup(symbol)[\"price\"])\n total=float(shares) *price\n total_share+=total\n display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})\n\n total_money=total_share+cash[0][\"cash\"]\n return render_template(\"index.html\",display=display,total_money=total_money,cash=cash_)\n\n\n\[email protected](\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")\n\[email protected](\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\[email protected](\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\[email protected](\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method==\"POST\":\n quote=lookup(request.form.get(\"symbol\"))\n if quote==None:\n return apology(\"Invalid symbol\",400)\n price=usd(quote[\"price\"])\n return render_template(\"quoted.html\",quote=quote,price=price)\n else:\n return render_template(\"quote.html\")\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Ensure comfirm password was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must comfirm password\", 400)\n\n # Ensure password matches\n elif request.form.get(\"confirmation\") != request.form.get(\"password\"):\n return apology(\"Password not matches\",400)\n\n # Ensure username is new(unique)\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n if len(rows) != 0:\n return apology(\"username used\", 400)\n\n db.execute(\"INSERT INTO users (username,hash) VALUES (?,?)\",request.form.get(\"username\"),generate_password_hash(request.form.get(\"password\")))\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n\n else:\n return render_template(\"register.html\")\n\n\[email protected](\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)\n\n\n\[email protected](\"/HAX\", methods=[\"GET\", \"POST\"])\n@login_required\ndef HAX():\n #add free monei boiiii\n if request.method==\"POST\":\n total=request.form.get(\"HAX\")\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n\n return redirect(\"/\")\n\n else:\n return render_template(\"HAX.html\")\n\n\n\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "step-ids": [ 3, 9, 13, 14, 15 ] }
[ 3, 9, 13, 14, 15 ]
import os import urllib.request import zipfile import tarfile import matplotlib.pyplot as plt %matplotlib inline from PIL import Image import numpy as np # フォルダ「data」が存在しない場合は作成する data_dir = "./data/" if not os.path.exists(data_dir): os.mkdir(data_dir) # MNIStをダウンロードして読み込む from sklearn.datasets import fetch_openml mnist = fetch_openml("mnist_784", version = 1, data_home = "./data") data_dir_path = "./data/img_78/" if not os.path.exists(data_dir_path): os.mkdir(data_dir_path) # MNIST1から数字の7, 8の画像だけフォルダ img_78に保存するよ count_7 = 0 count_8 = 0 N = 200 # 200枚ずつ作成 X = mnist.data y = mnist.target for i in range(len(X)): # generate image of 7 if (y[i] is "7") and (count_7 < N): file_path = "./data/img_78/img_7_" + str(count_7) + ".jpg" im_f = (X[i].reshape(28, 28)) pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大 pil_img_f.save(file_path) # 保存 count7+=1 # 画像8の作成 if (y[i] is "8") and (count8<max_num): file_path="./data/img_78/img_8_"+str(count_8)+".jpg" im_f=(X[i].reshape(28, 28)) # 画像を28*28の形に変形 pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大 pil_img_f.save(file_path) # 保存 count8+=1
normal
{ "blob_id": "6f53a989ddf179b699186a78b5d8cf6d3d08cbb2", "index": 4756, "step-1": "import os\nimport urllib.request\nimport zipfile\nimport tarfile\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom PIL import Image\nimport numpy as np\n\n# フォルダ「data」が存在しない場合は作成する\ndata_dir = \"./data/\"\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n# MNIStをダウンロードして読み込む\nfrom sklearn.datasets import fetch_openml\nmnist = fetch_openml(\"mnist_784\", version = 1, data_home = \"./data\")\n\ndata_dir_path = \"./data/img_78/\"\nif not os.path.exists(data_dir_path):\n os.mkdir(data_dir_path)\n\n# MNIST1から数字の7, 8の画像だけフォルダ img_78に保存するよ\n\ncount_7 = 0\ncount_8 = 0\nN = 200 # 200枚ずつ作成\n\nX = mnist.data\ny = mnist.target\n\n\nfor i in range(len(X)):\n\n # generate image of 7\n if (y[i] is \"7\") and (count_7 < N):\n file_path = \"./data/img_78/img_7_\" + str(count_7) + \".jpg\"\n im_f = (X[i].reshape(28, 28))\n pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに\n pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大\n pil_img_f.save(file_path) # 保存\n count7+=1 \n \n # 画像8の作成\n if (y[i] is \"8\") and (count8<max_num):\n file_path=\"./data/img_78/img_8_\"+str(count_8)+\".jpg\"\n im_f=(X[i].reshape(28, 28)) # 画像を28*28の形に変形\n pil_img_f = Image.fromarray(im_f.astype(np.uint8)) # 画像をPILに\n pil_img_f = pil_img_f.resize((64, 64), Image.BICUBIC) # 64×64に拡大\n pil_img_f.save(file_path) # 保存\n count8+=1 ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/python # -*- coding: UTF-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import unittest import logging from collections import Counter from utility import token_util class TestFileReadingFunctions(unittest.TestCase): def setUp(self): self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") self.one_word_per_line_path = os.path.join(self.data_dir, "one_word_per_line.txt") self.one_sent_per_line_path = os.path.join(self.data_dir, "one_sent_per_line.txt") self.token2id_path = os.path.join(self.data_dir, "token2id.txt") self.word_cnt_path_list = [self.one_sent_per_line_path, self.one_word_per_line_path] self.logger = logging.getLogger("ReadingFunctions Test Logger") def test_token_cnt(self): one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4}) one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6}) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=1, parallel_mode="size") self.assertEqual(c, one_word_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=3, parallel_mode="size") self.assertEqual(c, one_word_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size") self.assertEqual(c, one_sent_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size") self.assertEqual(c, one_sent_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size") self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size") self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="file") self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter) c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="file") self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter) def test_gen_token_id_from_file(self): one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4}) one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6}) res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=-1, separator=None) self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"]) res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=2, max_size=-1, separator=None) self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"]) res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=2, separator=None) self.assertEqual(res_list, ["f_6", "e_5"]) res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None) self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"]) res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None) self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"]) res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None) self.assertEqual(res_list, ["f_6", "e_5"]) res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2, max_size=-1, separator=None) self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3", "e_5", "b_2"], delta=2) res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-1, max_size=3, separator=None) self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3"], delta=2) def test_load_token_id(self): token2id, id2token = token_util.load_token_id(self.token2id_path) self.assertEqual(token2id, {"a_0": 0, "b_1": 1, "c_2": 2, "d_3": 3, "UNK": 4}) self.assertEqual(id2token, ["a_0", "b_1", "c_2", "d_3", "UNK"]) if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "7c3798aa9cc5424656572dfaa87f7acb961613eb", "index": 8715, "step-1": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n <mask token>\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nimport os\nimport unittest\nimport logging\nfrom collections import Counter\nfrom utility import token_util\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport unittest\nimport logging\nfrom collections import Counter\n\nfrom utility import token_util\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n self.one_word_per_line_path = os.path.join(self.data_dir, \"one_word_per_line.txt\")\n self.one_sent_per_line_path = os.path.join(self.data_dir, \"one_sent_per_line.txt\")\n self.token2id_path = os.path.join(self.data_dir, \"token2id.txt\")\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.one_word_per_line_path]\n\n self.logger = logging.getLogger(\"ReadingFunctions Test Logger\")\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4})\n one_sent_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4, \"e_5\": 5, \"f_6\": 6})\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_sent_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"file\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"file\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4})\n one_sent_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4, \"e_5\": 5, \"f_6\": 6})\n\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\", \"b_2\", \"a_1\"])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\"])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\"])\n\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\", \"b_2\", \"a_1\"])\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\"])\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\"])\n\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, [\"d_4\", \"f_6\", \"c_3\", \"e_5\", \"b_2\"], delta=2)\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, [\"d_4\", \"f_6\", \"c_3\"], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {\"a_0\": 0, \"b_1\": 1, \"c_2\": 2, \"d_3\": 3, \"UNK\": 4})\n self.assertEqual(id2token, [\"a_0\", \"b_1\", \"c_2\", \"d_3\", \"UNK\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from collections import OrderedDict as odict from vent.gui import styles MONITOR = odict({ 'oxygen': { 'name': 'O2 Concentration', 'units': '%', 'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals' : 1 }, 'temperature': { 'name': 'Temperature', 'units': '\N{DEGREE SIGN}C', 'abs_range': (0, 50), 'safe_range': (20, 30), 'decimals': 1 }, 'humidity': { 'name': 'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75), 'decimals': 1 }, 'vte': { 'name': 'VTE', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 80), 'decimals': 1 } }) CONTROL = { 'oxygen': { 'name': 'O2 Concentration', 'units': '%', 'abs_range': (0, 100), 'value': 80, 'decimals': 1 }, 'temperature': { 'name': 'Temperature', 'units': '\N{DEGREE SIGN}C', 'abs_range': (0, 50), 'value': 23, 'decimals': 1 }, } PLOTS = { 'flow': { 'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range': (20, 80), 'color': styles.SUBWAY_COLORS['yellow'], }, 'pressure': { 'name': 'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80), 'color': styles.SUBWAY_COLORS['orange'], } }
normal
{ "blob_id": "941dac77fe60081ffa113c437a356d59837f5883", "index": 5304, "step-1": "<mask token>\n", "step-2": "<mask token>\nMONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',\n 'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},\n 'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0, \n 50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':\n 'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),\n 'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0, \n 100), 'safe_range': (20, 80), 'decimals': 1}})\nCONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':\n (0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':\n 'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,\n 'decimals': 1}}\nPLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':\n (20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':\n 'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange']}}\n", "step-3": "from collections import OrderedDict as odict\nfrom vent.gui import styles\nMONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',\n 'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},\n 'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0, \n 50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':\n 'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),\n 'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0, \n 100), 'safe_range': (20, 80), 'decimals': 1}})\nCONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':\n (0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':\n 'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,\n 'decimals': 1}}\nPLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':\n (20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':\n 'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange']}}\n", "step-4": "from collections import OrderedDict as odict\n\nfrom vent.gui import styles\n\nMONITOR = odict({\n 'oxygen': {\n 'name': 'O2 Concentration',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (60, 100),\n 'decimals' : 1\n },\n 'temperature': {\n 'name': 'Temperature',\n 'units': '\\N{DEGREE SIGN}C',\n 'abs_range': (0, 50),\n 'safe_range': (20, 30),\n 'decimals': 1\n },\n 'humidity': {\n 'name': 'Humidity',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (20, 75),\n 'decimals': 1\n },\n 'vte': {\n 'name': 'VTE',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'decimals': 1\n }\n })\n\n\nCONTROL = {\n 'oxygen': {\n 'name': 'O2 Concentration',\n 'units': '%',\n 'abs_range': (0, 100),\n 'value': 80,\n 'decimals': 1\n },\n 'temperature': {\n 'name': 'Temperature',\n 'units': '\\N{DEGREE SIGN}C',\n 'abs_range': (0, 50),\n 'value': 23,\n 'decimals': 1\n },\n }\n\nPLOTS = {\n 'flow': {\n 'name': 'Flow (L/s)',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['yellow'],\n },\n 'pressure': {\n 'name': 'Pressure (mmHg)',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange'],\n }\n }", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
### Script to convert matlab structure file (/motiongan/data/style-dataset/style_motion_database.mat') import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import argparse import math import numpy as np from collections import OrderedDict import scipy.io import pickle from core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler ## Load motion data from .mat file def load_motion(mat_path, out): mat_data = scipy.io.loadmat(mat_path)['motion_database'] file_nums = mat_data.shape[1] motion_data_all = {} for f_id in range(file_nums): motion_data = {} # Get style and motion content motion_data['style'] = mat_data[0,f_id][0][0] motion_data['motion_type'] = mat_data[0,f_id][1][0] # Get file name full_path = mat_data[0,f_id][2][0,0][0][0] file_name = full_path.split('\\')[-1] # Get joint parameters frame_nums = mat_data[0,f_id][2].shape[1] root_pos = np.zeros((frame_nums,3)) joint_nums = mat_data[0,f_id][2][0,0][2].shape[0] motion_data['joint_nums'] = joint_nums joint_quarternions = np.zeros((frame_nums, joint_nums, 4)) for i in range(frame_nums): root_pos[i,:] = mat_data[0,f_id][2][0,i][1] joint_quarternions[i,:,:] = mat_data[0,f_id][2][0,i][2] motion_data['root_position'] = root_pos motion_data['joint_quarternions'] = joint_quarternions # Get foot contact annotation motion_data['foot_contact'] = mat_data[0,f_id][3][0] # Save file as pickle with open(os.path.join(out, os.path.splitext(file_name)[0]+'.pkl'), 'wb') as f: pickle.dump(motion_data, f) motion_data_all[file_name] = motion_data return motion_data_all ## Load skeleton data from .mat file def load_skeleton(mat_path): mat_data = scipy.io.loadmat(mat_path)['skel'][0,0] # Init skeleton skeleton = OrderedDict() bone_names = mat_data[1].tolist() for i, bone in enumerate(bone_names): bone = bone.strip() if bone == 'Site': bone = bone_names[i-1].strip() + bone skeleton[bone] = {'offset':[], 'parent':[], 'children':[]} # Resister bone parent and children, offset parent_ids = mat_data[2][0] offsets = mat_data[3] for i, bone in enumerate(skeleton.keys()): if bone != 'root': parent = list(skeleton.keys())[parent_ids[i]-1] skeleton[bone]['parent'] = parent skeleton[parent]['children'].append(bone) skeleton[bone]['offset'] = offsets[i,:] return skeleton ## Construct hierarchy of skeleton for bvh def construct_hierarchy(skeleton): hierarchy = ['HIERARCHY\r\n'] # Calc tree level level = 0 for i, bone in enumerate(skeleton.keys()): if bone == 'root': skeleton[bone]['level'] = 0 else: parent = skeleton[bone]['parent'] skeleton[bone]['level'] = skeleton[parent]['level'] + 1 # Write hierarchy for i, bone in enumerate(skeleton.keys()): offset = skeleton[bone]['offset'] if bone == 'root': hierarchy.append('ROOT root\r\n') hierarchy.append('{\r\n') hierarchy.append('\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2])) hierarchy.append('\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\r\n') elif bone.endswith('Site'): parent = skeleton[bone]['parent'] level = skeleton[bone]['level'] tabs = '\t' * level hierarchy.append(tabs + 'End Site\r\n') hierarchy.append(tabs + '{\r\n') hierarchy.append(tabs + '\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2])) hierarchy.append(tabs + '}\r\n') # Put end brancket if i == len(skeleton.keys())-1: while level > 0: level -= 1 hierarchy.append('\t' * level + '}\r\n') else: for _ in range(level - skeleton[list(skeleton.keys())[i+1]]['level']): level -= 1 hierarchy.append('\t' * level + '}\r\n') else: parent = skeleton[bone]['parent'] level = skeleton[bone]['level'] tabs = '\t'*level hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\r\n') hierarchy.append(tabs + '{\r\n') hierarchy.append(tabs + '\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2])) hierarchy.append(tabs + '\tCHANNELS 3 Zrotation Yrotation Xrotation\r\n') #with open('hierarchy_test.txt', 'w') as f: # f.writelines(hierarchy) return hierarchy # Write .bvh file def write_bvh(skeleton, hierarchy, motion_data_all, out): for file_name, motion_data in motion_data_all.items(): joint_quarternions = motion_data['joint_quarternions'] root_pos = motion_data['root_position'] # Convert data to list of string frames = [] for i in range(joint_quarternions.shape[0]): # Root pos root_pos_i = root_pos[i] frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist()) for j in range(joint_quarternions.shape[1]): # If Endsite, skip if list(skeleton.keys())[j].endswith('Site'): continue ## This implementation is modified to quarternion with 'xyzw' order R_ij = quaternion_to_rotation_mat(joint_quarternions[i,j,3], joint_quarternions[i,j,2], joint_quarternions[i,j,1], joint_quarternions[i,j,0]) euler_ij = rotation_mat_to_euler(R_ij) frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda s: s * (180.0/math.pi), euler_ij.tolist()))) frame += '\r\n' frames.append(frame) # Write with open(os.path.join(out, file_name), 'w') as f: f.writelines(hierarchy) f.write('MOTION\r\n') frames[0] = 'Frames: {0}\r\nFrame Time: 0.0083333\r\n'.format(joint_quarternions.shape[0]) + frames[0] f.writelines(frames) print(os.path.join(out, file_name)) def main(): parser = argparse.ArgumentParser() parser.add_argument('out', type=str) args = parser.parse_args() out = args.out motion_data_all = load_motion('../../motiongan/data/style-dataset/style_motion_database.mat', out) skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat') hierarchy = construct_hierarchy(skeleton) write_bvh(skeleton, hierarchy, motion_data_all, out) if __name__ == '__main__': main()
normal
{ "blob_id": "f2dac8b454805829cf5dbe2efe3c0de805ae4cb5", "index": 1727, "step-1": "<mask token>\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\n<mask token>\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\n<mask token>\n", "step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport argparse\nimport math\nimport numpy as np\nfrom collections import OrderedDict\nimport scipy.io\nimport pickle\nfrom core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "### Script to convert matlab structure file (/motiongan/data/style-dataset/style_motion_database.mat')\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport argparse\nimport math\nimport numpy as np\nfrom collections import OrderedDict\nimport scipy.io\nimport pickle\n\nfrom core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler\n\n## Load motion data from .mat file\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n # Get style and motion content \n motion_data['style'] = mat_data[0,f_id][0][0]\n motion_data['motion_type'] = mat_data[0,f_id][1][0] \n\n # Get file name\n full_path = mat_data[0,f_id][2][0,0][0][0]\n file_name = full_path.split('\\\\')[-1]\n\n # Get joint parameters\n frame_nums = mat_data[0,f_id][2].shape[1]\n root_pos = np.zeros((frame_nums,3))\n \n joint_nums = mat_data[0,f_id][2][0,0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i,:] = mat_data[0,f_id][2][0,i][1]\n joint_quarternions[i,:,:] = mat_data[0,f_id][2][0,i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n\n # Get foot contact annotation\n motion_data['foot_contact'] = mat_data[0,f_id][3][0]\n\n\n # Save file as pickle\n with open(os.path.join(out, os.path.splitext(file_name)[0]+'.pkl'), 'wb') as f:\n pickle.dump(motion_data, f)\n\n motion_data_all[file_name] = motion_data\n\n return motion_data_all\n\n\n## Load skeleton data from .mat file\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0,0]\n\n # Init skeleton\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i-1].strip() + bone\n skeleton[bone] = {'offset':[], 'parent':[], 'children':[]}\n \n # Resister bone parent and children, offset\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root': \n parent = list(skeleton.keys())[parent_ids[i]-1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n\n skeleton[bone]['offset'] = offsets[i,:]\n\n return skeleton\n\n\n## Construct hierarchy of skeleton for bvh\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n \n # Calc tree level\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n\n # Write hierarchy\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append('\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n')\n\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs + '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n # Put end brancket\n if i == len(skeleton.keys())-1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else: \n for _ in range(level - skeleton[list(skeleton.keys())[i+1]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t'*level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs + '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append(tabs + '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n \n #with open('hierarchy_test.txt', 'w') as f:\n # f.writelines(hierarchy)\n return hierarchy\n\n\n# Write .bvh file\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n\n # Convert data to list of string\n frames = []\n for i in range(joint_quarternions.shape[0]):\n # Root pos\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist()) \n\n for j in range(joint_quarternions.shape[1]):\n # If Endsite, skip\n if list(skeleton.keys())[j].endswith('Site'): \n continue\n ## This implementation is modified to quarternion with 'xyzw' order\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i,j,3], joint_quarternions[i,j,2], joint_quarternions[i,j,1], joint_quarternions[i,j,0]) \n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda s: s * (180.0/math.pi), euler_ij.tolist())))\n\n frame += '\\r\\n'\n frames.append(frame)\n \n # Write\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n \n print(os.path.join(out, file_name))\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n\n args = parser.parse_args()\n out = args.out\n\n motion_data_all = load_motion('../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out) \n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
# -*- coding: utf-8 -*- """Very basic codec tests. :copyright: the translitcodec authors and developers, see AUTHORS. :license: MIT, see LICENSE for more details. """ import codecs import translitcodec data = u'£ ☹ wøóf méåw' def test_default(): assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw' def test_translit_long(): assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw' def test_translit_short(): assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw' def test_translit_one(): assert codecs.encode(data, 'translit/one') == u'\u00a3 \u2639 woof meaw' def test_translit_long_ascii(): data.encode('translit/long/ascii') == b'GBP :-( woof meaaw' def test_translit_short_ascii(): data.encode('translit/short/ascii') == b'GBP :-( woof meaw' def test_translit_one_ascii(): try: codecs.encode(data, 'translit/one/ascii') assert False except UnicodeEncodeError: assert True assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw' def test_ascii_level_characters_remain(): assert codecs.encode(u"'", 'translit/long') == u"'" def test_zero_width_space(): try: char = codecs.encode(u'\u200b', 'translit/long') assert char == u'' except TypeError: assert False
normal
{ "blob_id": "426002bf900e23fd9b1d32c484350ac854228459", "index": 2565, "step-1": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\n<mask token>\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\n<mask token>\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n", "step-2": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n", "step-3": "<mask token>\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n", "step-4": "<mask token>\ndata = u'£ ☹ wøóf méåw'\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"Very basic codec tests.\n\n:copyright: the translitcodec authors and developers, see AUTHORS.\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\nimport codecs\nimport translitcodec\n\n\ndata = u'£ ☹ wøóf méåw'\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'\\u00a3 \\u2639 woof meaw'\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n\n assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw'\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n", "step-ids": [ 6, 8, 9, 10, 12 ] }
[ 6, 8, 9, 10, 12 ]
import re from xml.etree import ElementTree def get_namespace(xml_path): with open(xml_path) as f: namespaces = re.findall(r"xmlns:(.*?)=\"(.*?)\"", f.read()) return dict(namespaces) def get_comic_data(item, ns): return { "title": item.find("title").text, "post_date": item.find("pubDate").text, "path": "", "guid": item.find("guid").text, "alt_text": "", "tags": [child.text for child in item.findall("category")], "text": item.find("content:encoded", ns).text, } def get_comics(xml_path): ns = get_namespace(xml_path) tree = ElementTree.parse(xml_path) root = tree.getroot() assert root.tag == "rss" channel = root[0] assert channel.tag == "channel" version = channel.find("wp:wxr_version", ns).text assert version == "1.2" return [get_comic_data(item, ns) for item in channel.findall("item")]
normal
{ "blob_id": "86a15bb2e4d59fb5c8763fa2de31164beb327685", "index": 7928, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_namespace(xml_path):\n with open(xml_path) as f:\n namespaces = re.findall('xmlns:(.*?)=\\\\\"(.*?)\\\\\"', f.read())\n return dict(namespaces)\n\n\ndef get_comic_data(item, ns):\n return {'title': item.find('title').text, 'post_date': item.find(\n 'pubDate').text, 'path': '', 'guid': item.find('guid').text,\n 'alt_text': '', 'tags': [child.text for child in item.findall(\n 'category')], 'text': item.find('content:encoded', ns).text}\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_namespace(xml_path):\n with open(xml_path) as f:\n namespaces = re.findall('xmlns:(.*?)=\\\\\"(.*?)\\\\\"', f.read())\n return dict(namespaces)\n\n\ndef get_comic_data(item, ns):\n return {'title': item.find('title').text, 'post_date': item.find(\n 'pubDate').text, 'path': '', 'guid': item.find('guid').text,\n 'alt_text': '', 'tags': [child.text for child in item.findall(\n 'category')], 'text': item.find('content:encoded', ns).text}\n\n\ndef get_comics(xml_path):\n ns = get_namespace(xml_path)\n tree = ElementTree.parse(xml_path)\n root = tree.getroot()\n assert root.tag == 'rss'\n channel = root[0]\n assert channel.tag == 'channel'\n version = channel.find('wp:wxr_version', ns).text\n assert version == '1.2'\n return [get_comic_data(item, ns) for item in channel.findall('item')]\n", "step-4": "import re\nfrom xml.etree import ElementTree\n\n\ndef get_namespace(xml_path):\n with open(xml_path) as f:\n namespaces = re.findall('xmlns:(.*?)=\\\\\"(.*?)\\\\\"', f.read())\n return dict(namespaces)\n\n\ndef get_comic_data(item, ns):\n return {'title': item.find('title').text, 'post_date': item.find(\n 'pubDate').text, 'path': '', 'guid': item.find('guid').text,\n 'alt_text': '', 'tags': [child.text for child in item.findall(\n 'category')], 'text': item.find('content:encoded', ns).text}\n\n\ndef get_comics(xml_path):\n ns = get_namespace(xml_path)\n tree = ElementTree.parse(xml_path)\n root = tree.getroot()\n assert root.tag == 'rss'\n channel = root[0]\n assert channel.tag == 'channel'\n version = channel.find('wp:wxr_version', ns).text\n assert version == '1.2'\n return [get_comic_data(item, ns) for item in channel.findall('item')]\n", "step-5": "import re\nfrom xml.etree import ElementTree\n\n\ndef get_namespace(xml_path):\n with open(xml_path) as f:\n namespaces = re.findall(r\"xmlns:(.*?)=\\\"(.*?)\\\"\", f.read())\n return dict(namespaces)\n\n\ndef get_comic_data(item, ns):\n return {\n \"title\": item.find(\"title\").text,\n \"post_date\": item.find(\"pubDate\").text,\n \"path\": \"\",\n \"guid\": item.find(\"guid\").text,\n \"alt_text\": \"\",\n \"tags\": [child.text for child in item.findall(\"category\")],\n \"text\": item.find(\"content:encoded\", ns).text,\n }\n\n\ndef get_comics(xml_path):\n ns = get_namespace(xml_path)\n\n tree = ElementTree.parse(xml_path)\n root = tree.getroot()\n assert root.tag == \"rss\"\n channel = root[0]\n assert channel.tag == \"channel\"\n\n version = channel.find(\"wp:wxr_version\", ns).text\n assert version == \"1.2\"\n\n return [get_comic_data(item, ns) for item in channel.findall(\"item\")]\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
"""Vista de Autorizaciones (Clientes/Especialistas/Vendedores).""" from django.shortcuts import render from dashboard.json2table import convert from django.utils.translation import ugettext_lazy as _ from api.connection import api from login.utils.tools import role_admin_check from django.utils.decorators import method_decorator from django.contrib.auth.decorators import user_passes_test from dashboard.tools import capitalize as cap, ToolsBackend as Tools from dashboard.forms import AuthorizationClientFilter class Autorization: logo_content_header = "fa fa-key" vars_page = {} def generate_header(self, custom_title=None): if custom_title: title = "{} - ".format(_("authorizations")).title() + custom_title else: title = self.title_content_header header = {'icon': self.logo_content_header, 'title': title} return {**header, **self.vars_page} class AutorizationClient(Autorization): """ Manejo de autorizaciones de clientes, se listan los clientes, en orden de pendiente, aprobado y rechazado, segun fecha Para posterior aprovacion o rechazo """ @method_decorator(user_passes_test(role_admin_check())) def list(self, request): """ Listado de clientes por autorizar, se incluyen tambien clientes aprovados y rechazados """ obj_api = api() # actual_page = get_actual_page(request) token = request.session['token'] title_page = _('User - User Affiliation').title() filters = {} form_filters = AuthorizationClientFilter(request.GET) if form_filters.is_valid(): # Agregamos filtros de encontrarse alguno filters = form_filters.cleaned_data tools = Tools() filters['from_date'] = tools.date_format_to_db(date=filters['from_date']) filters['until_date'] = tools.date_format_to_db(date=filters['until_date']) filters = form_filters.cleaned_data if request.method == 'GET': if 'approve' in request.GET and request.GET['approve']: pk = request.GET['approve'] data = {"status":1} obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data) if 'rejected' in request.GET and request.GET['rejected']: pk = request.GET['rejected'] data = {"status":2} obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data) # Traer data para el listado data = obj_api.get(slug='authorizations/clients/', arg=filters, token=token) header_table = [("", "code_seller"), ("", "name"),( "", "document_type_name"), ( "", "document"),( "", ""), ("", ""), ( "", "document"), ( "", "approve"), ("", "rejected"), ( "", "date_join")] # Multiples header, una lista por cada nivel de la cabecera multi_header = [ [ (_("seller code"), {'rowspan': '2'}), (_('user'), {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan': '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2', 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan': '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'}), ], [ (_('name or Social reason'), {}), (_('type document'), {}), (_('document number'), {}), (_('description'), {}), (_('Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {}), ], ] approve_column = {'type': 'submit', 'data': {'name':'approve','key':'id', 'cls':'btn btn-success','text':cap(_('approve'))}} rejected_column = {'type': 'submit', 'data': {'name':'rejected','key':'id', 'cls':'btn btn-danger','text':cap(_('rejected'))}} custom_column = { "date_join": {'type': 'date', 'data': ('date_join',)}, "approve": {'type': 'if_eval', 'data': ('r["status"]=="0"',), 'next': approve_column}, "rejected": { 'type': 'if_eval', 'data': ('r["status"]=="0"',), 'next': rejected_column }, } table = convert(data, header=header_table, multi_header=multi_header, custom_column=custom_column) # Titulo de la vista y variables de la Clase vars_page = self.generate_header(custom_title=title_page) return render(request, 'admin/authorization/clients.html', {'table': table, 'vars_page': vars_page, 'form_filters':form_filters})
normal
{ "blob_id": "b78ad3a55eb27fd91f89c22db07fadca297640ab", "index": 2892, "step-1": "<mask token>\n\n\nclass Autorization:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n", "step-2": "<mask token>\n\n\nclass Autorization:\n <mask token>\n <mask token>\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n", "step-3": "<mask token>\n\n\nclass Autorization:\n logo_content_header = 'fa fa-key'\n vars_page = {}\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n", "step-4": "<mask token>\nfrom django.shortcuts import render\nfrom dashboard.json2table import convert\nfrom django.utils.translation import ugettext_lazy as _\nfrom api.connection import api\nfrom login.utils.tools import role_admin_check\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import user_passes_test\nfrom dashboard.tools import capitalize as cap, ToolsBackend as Tools\nfrom dashboard.forms import AuthorizationClientFilter\n\n\nclass Autorization:\n logo_content_header = 'fa fa-key'\n vars_page = {}\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n", "step-5": "\"\"\"Vista de Autorizaciones (Clientes/Especialistas/Vendedores).\"\"\"\nfrom django.shortcuts import render\nfrom dashboard.json2table import convert\nfrom django.utils.translation import ugettext_lazy as _\nfrom api.connection import api\nfrom login.utils.tools import role_admin_check\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import user_passes_test\nfrom dashboard.tools import capitalize as cap, ToolsBackend as Tools\nfrom dashboard.forms import AuthorizationClientFilter\nclass Autorization:\n logo_content_header = \"fa fa-key\"\n vars_page = {}\n def generate_header(self, custom_title=None):\n if custom_title:\n title = \"{} - \".format(_(\"authorizations\")).title() + custom_title\n else:\n title = self.title_content_header\n\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n\n obj_api = api()\n # actual_page = get_actual_page(request)\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n\n form_filters = AuthorizationClientFilter(request.GET)\n\n if form_filters.is_valid(): # Agregamos filtros de encontrarse alguno\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters['from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters['until_date'])\n filters = form_filters.cleaned_data\n \n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {\"status\":1}\n obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)\n\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {\"status\":2}\n obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)\n\n # Traer data para el listado\n data = obj_api.get(slug='authorizations/clients/', arg=filters, token=token)\n\n\n header_table = [(\"\", \"code_seller\"), (\"\", \"name\"),(\n \"\", \"document_type_name\"), ( \"\", \"document\"),(\n \"\", \"\"), (\"\", \"\"), (\n \"\", \"document\"), (\n \"\", \"approve\"), (\"\", \"rejected\"), (\n \"\", \"date_join\")]\n\n # Multiples header, una lista por cada nivel de la cabecera\n multi_header = [\n [\n (_(\"seller code\"), {'rowspan': '2'}),\n (_('user'), {'rowspan': '1', 'colspan': '3'}),\n (_('product'), {'rowspan': '1', 'colspan': '2'}),\n (_('user code'), {'rowspan': '2', 'colspan': '1'}),\n (_('validation'), {'rowspan': '1', 'colspan': '2'}),\n (_('date'), {'rowspan': '2', 'colspan': '1'}),\n ],\n [\n (_('name or Social reason'), {}),\n (_('type document'), {}),\n (_('document number'), {}),\n (_('description'), {}),\n (_('Query Numbers'), {}),\n (_('approve'), {}),\n (_('deneis'), {}),\n ],\n ]\n\n approve_column = {'type': 'submit', 'data': {'name':'approve','key':'id',\n 'cls':'btn btn-success','text':cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name':'rejected','key':'id',\n 'cls':'btn btn-danger','text':cap(_('rejected'))}}\n custom_column = {\n \"date_join\": {'type': 'date', 'data': ('date_join',)},\n \"approve\": {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"',),\n 'next': approve_column},\n \"rejected\": {\n 'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',),\n 'next': rejected_column\n },\n }\n\n table = convert(data, header=header_table, multi_header=multi_header, custom_column=custom_column)\n\n # Titulo de la vista y variables de la Clase\n vars_page = self.generate_header(custom_title=title_page)\n\n return render(request, 'admin/authorization/clients.html',\n {'table': table, 'vars_page': vars_page, 'form_filters':form_filters})", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
#!/usr/bin/env python3 # -*- coding=utf-8 -*- # description: # author:jack # create_time: 2017/12/30 """ 卡片基类 """ import logging class BaseCard(object): def __init__(self, field=[]): self.data = {} self.support_set_field = field def add_cue_words(self, arr): """ 为卡片添加cue words 提示用户输入 :param arr: :return: """ if arr: if isinstance(arr, str): arr = [arr] if 'cueWords' in self.data: self.data['cueWords'] = self.data['cueWords'] else: self.data['cueWords'] = [] self.data['cueWords'].extend(arr) return self def set_anchor(self, url, anchor_text): """ 设置卡片链接 :param url: 比如:http(s)://.... :param anchor_text: 链接显示的文字 :return: """ if url: self.data['url'] = url if anchor_text: self.data['anchorText'] = anchor_text return self def get_data(self): return self.data def __getattr__(self, item): """ 添加魔术方法 :param item: :return: """ # 获取操作类型 set operation = item[0:3] # 获取被操作的属性 set_xxxx 获取xxxx field = item[4:] if operation == 'set' and field and (field.lower() in self.support_set_field): def function(*args): self.data[field.lower()] = args[0] return function else: def function(*args): logging.info("不支持 %s_%s" % (operation, field)) print('不支持', operation, field) return function if __name__ == '__main__': pass
normal
{ "blob_id": "93e5852df00733c024a59d37699bae58bd893030", "index": 112, "step-1": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\nif __name__ == '__main__':\n pass\n", "step-5": "#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\n# description:\n# author:jack\n# create_time: 2017/12/30\n\"\"\"\n卡片基类\n\"\"\"\nimport logging\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n # 获取操作类型 set\n operation = item[0:3]\n # 获取被操作的属性 set_xxxx 获取xxxx\n field = item[4:]\n if operation == 'set' and field and (field.lower() in self.support_set_field):\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n def function(*args):\n logging.info(\"不支持 %s_%s\" % (operation, field))\n print('不支持', operation, field)\n\n return function\n\n\nif __name__ == '__main__':\n pass\n", "step-ids": [ 2, 3, 4, 7, 9 ] }
[ 2, 3, 4, 7, 9 ]
import scrapy from scrapy.loader import ItemLoader class BlogSpider(scrapy.Spider): name = 'blogspider' start_urls = ['https://blog.scrapinghub.com'] def content_title_parser(self, mystr): return mystr[0].split(' ')[3] def parse(self, response): for url in response.css('ul li a::attr("href")').re('.*/category/.*'): yield scrapy.Request(response.urljoin(url), self.parse_titles) def parse_titles(self, response): l = ItemLoader(item=Posts(), response=response) l.add_css('content_title', 'h1.pagetitle::text', self. content_title_parser) l.add_css('post_title', 'div.entries > ul > li a::text') return l.load_item() class Posts(scrapy.Item): content_title = scrapy.Field() post_title = scrapy.Field()
normal
{ "blob_id": "4c79dcf394acbcc9a636bcc9b0aac13a2bafc7e3", "index": 9249, "step-1": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n <mask token>\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n", "step-2": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n <mask token>\n <mask token>\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n <mask token>\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n", "step-3": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n name = 'blogspider'\n start_urls = ['https://blog.scrapinghub.com']\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n\n def parse_titles(self, response):\n l = ItemLoader(item=Posts(), response=response)\n l.add_css('content_title', 'h1.pagetitle::text', self.\n content_title_parser)\n l.add_css('post_title', 'div.entries > ul > li a::text')\n return l.load_item()\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n", "step-4": "import scrapy\nfrom scrapy.loader import ItemLoader\n\n\nclass BlogSpider(scrapy.Spider):\n name = 'blogspider'\n start_urls = ['https://blog.scrapinghub.com']\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n\n def parse_titles(self, response):\n l = ItemLoader(item=Posts(), response=response)\n l.add_css('content_title', 'h1.pagetitle::text', self.\n content_title_parser)\n l.add_css('post_title', 'div.entries > ul > li a::text')\n return l.load_item()\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n", "step-5": null, "step-ids": [ 4, 5, 7, 8 ] }
[ 4, 5, 7, 8 ]
from HurdleRace import hurdleRace from ddt import ddt, data, unpack import unittest class test_AppendAndDelete3(unittest.TestCase): def test_hurdleRace(self): height = [1, 6, 3, 5, 2] k = 4 sum_too_high = hurdleRace(k, height) self.assertEqual(2, sum_too_high)
normal
{ "blob_id": "ea86a2a9068c316d3efcbcb165a8ef3d3516ba1b", "index": 4763, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n\n def test_hurdleRace(self):\n height = [1, 6, 3, 5, 2]\n k = 4\n sum_too_high = hurdleRace(k, height)\n self.assertEqual(2, sum_too_high)\n", "step-4": "from HurdleRace import hurdleRace\nfrom ddt import ddt, data, unpack\nimport unittest\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n\n def test_hurdleRace(self):\n height = [1, 6, 3, 5, 2]\n k = 4\n sum_too_high = hurdleRace(k, height)\n self.assertEqual(2, sum_too_high)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import inaccel.coral as inaccel import numpy as np import time class StereoBM: def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ): # allocate mem for camera parameters for rectification and bm_state class with inaccel.allocator: if cameraMA_l is None: self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32) else: self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32) if cameraMA_r is None: self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32) else: self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32) if distC_l is None: self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32) else: self.distC_l_fl = np.array(distC_l, dtype=np.float32) if distC_r is None: self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32) else: self.distC_r_fl = np.array(distC_r, dtype=np.float32) if irA_l is None: self.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \ -0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32) else: self.irA_l_fl = np.array(irA_l, dtype=np.float32) if irA_r is None: self.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \ -0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32) else: self.irA_r_fl = np.array(irA_r, dtype=np.float32) if bm_state is None: self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32) else: self.bm_state_arr = np.array(bm_state, dtype=np.int32) def runAsync(self, left_img, right_img): self.m_runStartTime = int(round(time.time() * 1000000)) if left_img is None: raise RuntimeError('Invalid left image') if right_img is None: raise RuntimeError('Invalid right image') if left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]: raise RuntimeError('Image sizes differ') # allocate and initialize buffers rows = np.int32(left_img.shape[0]); cols = np.int32(left_img.shape[1]); with inaccel.allocator: self.left_mat = np.array(left_img) self.right_mat = np.array(right_img) self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16) # Create request for stereo accelerator req = inaccel.request('com.xilinx.vitis.vision.stereoBM') req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat) req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl) req.arg(self.distC_l_fl).arg(self.distC_r_fl) req.arg(self.irA_l_fl).arg(self.irA_r_fl) req.arg(self.bm_state_arr) req.arg(rows).arg(cols) self.response = inaccel.submit(req) def wait(self): # Send request and wait for completion self.response.result() # Write output image disp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8) self.m_runEndTime = int(round(time.time() * 1000000)) return disp_mat_scaled; def run(self, left_img, right_img): self.runAsync(left_img, right_img) return self.wait() def lastruntime(self): duration = self.m_runEndTime - self.m_runStartTime return duration
normal
{ "blob_id": "66f3590381fe96c49a8926a806b4a845f0d7e25d", "index": 4681, "step-1": "<mask token>\n\n\nclass StereoBM:\n <mask token>\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n", "step-2": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n", "step-3": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n", "step-4": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n", "step-5": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\nclass StereoBM:\n\tdef __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):\n\t\t# allocate mem for camera parameters for rectification and bm_state class\n\t\twith inaccel.allocator:\n\t\t\tif cameraMA_l is None:\n\t\t\t\tself.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n\n\t\t\tif cameraMA_r is None:\n\t\t\t\tself.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n\n\t\t\tif distC_l is None:\n\t\t\t\tself.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_l_fl = np.array(distC_l, dtype=np.float32)\n\n\t\t\tif distC_r is None:\n\t\t\t\tself.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_r_fl = np.array(distC_r, dtype=np.float32)\n\n\t\t\tif irA_l is None:\n\t\t\t\tself.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \\\n\t \t\t\t\t\t\t\t\t-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_l_fl = np.array(irA_l, dtype=np.float32)\n\n\t\t\tif irA_r is None:\n\t\t\t\tself.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \\\n\t -0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_r_fl = np.array(irA_r, dtype=np.float32)\n\n\t\t\tif bm_state is None:\n\t\t\t\tself.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)\n\t\t\telse:\n\t\t\t\tself.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n\n\tdef runAsync(self, left_img, right_img):\n\t\tself.m_runStartTime = int(round(time.time() * 1000000))\n\n\t\tif left_img is None:\n\t\t\traise RuntimeError('Invalid left image')\n\t\tif right_img is None:\n\t\t\traise RuntimeError('Invalid right image')\n\t\tif left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:\n\t\t\traise RuntimeError('Image sizes differ')\n\n\t\t# allocate and initialize buffers\n\t\trows = np.int32(left_img.shape[0]);\n\t\tcols = np.int32(left_img.shape[1]);\n\n\t\twith inaccel.allocator:\n\t\t\tself.left_mat = np.array(left_img)\n\t\t\tself.right_mat = np.array(right_img)\n\t\t\tself.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n\n\t\t# Create request for stereo accelerator\n\t\treq = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n\t\treq.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n\t\treq.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n\t\treq.arg(self.distC_l_fl).arg(self.distC_r_fl)\n\t\treq.arg(self.irA_l_fl).arg(self.irA_r_fl)\n\t\treq.arg(self.bm_state_arr)\n\t\treq.arg(rows).arg(cols)\n\n\t\tself.response = inaccel.submit(req)\n\n\tdef wait(self):\n\t\t# Send request and wait for completion\n\t\tself.response.result()\n\n\t\t# Write output image\n\t\tdisp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)\n\n\t\tself.m_runEndTime = int(round(time.time() * 1000000))\n\t\treturn disp_mat_scaled;\n\n\tdef run(self, left_img, right_img):\n\t\tself.runAsync(left_img, right_img)\n\t\treturn self.wait()\n\n\tdef lastruntime(self):\n\t\tduration = self.m_runEndTime - self.m_runStartTime\n\t\treturn duration\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import os import attr import click import guitarpro import psutil ALL = object() @attr.s class GPTools: input_file = attr.ib() output_file = attr.ib() selected_track_numbers = attr.ib(default=None) selected_measure_numbers = attr.ib(default=None) selected_beat_numbers = attr.ib(default=None) song = None def parse(self): if self.input_file is None: self.input_file = self.find_clipboard() if self.output_file is None: self.output_file = self.input_file self.song = guitarpro.parse(self.input_file) if self.selected_track_numbers is None: if self.song.clipboard is not None: self.selected_track_numbers = list(range(self.song.clipboard.startTrack, self.song.clipboard.stopTrack+1)) else: self.selected_track_numbers = ALL if self.selected_measure_numbers is None: if self.song.clipboard is not None: self.selected_measure_numbers = list(range(self.song.clipboard.startMeasure, self.song.clipboard.stopMeasure+1)) else: self.selected_measure_numbers = ALL if self.selected_beat_numbers is None: if self.song.clipboard is not None and self.song.clipboard.subBarCopy: self.selected_beat_numbers = list(range(self.song.clipboard.startBeat, self.song.clipboard.stopBeat+1)) else: self.selected_beat_numbers = ALL def find_clipboard(self): for process in psutil.process_iter(): if process.name().lower() != 'gp5.exe': continue break else: raise click.ClickException('cannot get Guitar Pro 5 clipboard, is the process running?') exe_path = process.cmdline()[0] clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp', 'clipboard.tmp') return clipboard_path def write(self): format = None if self.song.clipboard is None else 'tmp' guitarpro.write(self.song, self.output_file, format=format) def selected(self): for track in self.selected_tracks(): for measure in self.selected_measures(track): for voice in measure.voices: for beat in self.selected_beats(voice): yield track, measure, voice, beat def selected_tracks(self): if self.selected_track_numbers is ALL: yield from self.song.tracks return for track in self.song.tracks: if track.number in self.selected_track_numbers: yield track def selected_measures(self, track): if self.selected_measure_numbers is ALL: yield from track.measures return for measure in track.measures: if measure.number in self.selected_measure_numbers: yield measure def selected_beats(self, voice): if self.selected_beat_numbers is ALL: yield from voice.beats return for number, beat in enumerate(voice.beats, start=1): if number in self.selected_beat_numbers: yield beat
normal
{ "blob_id": "c6821cb8dd6f8d74ca20c03f87dae321eb869c32", "index": 2454, "step-1": "<mask token>\n\n\[email protected]\nclass GPTools:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n <mask token>\n", "step-2": "<mask token>\n\n\[email protected]\nclass GPTools:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n", "step-3": "<mask token>\n\n\[email protected]\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n", "step-4": "import os\nimport attr\nimport click\nimport guitarpro\nimport psutil\nALL = object()\n\n\[email protected]\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n", "step-5": "import os\n\nimport attr\nimport click\nimport guitarpro\nimport psutil\n\nALL = object()\n\n\[email protected]\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n\n self.song = guitarpro.parse(self.input_file)\n\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.clipboard.startTrack, self.song.clipboard.stopTrack+1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.clipboard.startMeasure, self.song.clipboard.stopMeasure+1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if self.song.clipboard is not None and self.song.clipboard.subBarCopy:\n self.selected_beat_numbers = list(range(self.song.clipboard.startBeat, self.song.clipboard.stopBeat+1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException('cannot get Guitar Pro 5 clipboard, is the process running?')\n\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp', 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n", "step-ids": [ 7, 8, 9, 11, 12 ] }
[ 7, 8, 9, 11, 12 ]
# 213. 打家劫舍 II # 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。 # 同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。 # 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。 class Solution: # 86.24%, 15.46% def rob(self, nums) -> int: n = len(nums) if n == 0: return 0 if n == 1: return nums[0] return max(self.helper(nums[1:],n-1),self.helper(nums[:-1],n-1)) def helper(self,nums,n): if n == 1: return nums[0] dp = [0] * n dp[0] = nums[0] dp[1] = max(nums[0], nums[1]) for i in range(2, n): dp[i] = max(dp[i - 1], dp[i - 2] + nums[i]) return dp[n-1] # 优秀解答 def rob2(self, nums) -> int: n = len(nums) if nums == []: return 0 if len(nums) == 1: return nums[0] # 抢了 dp = [[0, 0] for _ in range(n)] dp[0][1] = nums[0] dp[0][0] = float('-inf') for i in range(1, n): dp[i][1] = dp[i - 1][0] + nums[i] dp[i][0] = max(dp[i - 1][0], dp[i - 1][1]) tmp_max = dp[n - 1][0] # 没抢 dp = [[0, 0] for _ in range(n)] dp[0][1] = float('-inf') dp[0][0] = 0 for i in range(1, n): dp[i][1] = dp[i - 1][0] + nums[i] dp[i][0] = max(dp[i - 1][0], dp[i - 1][1]) return max(dp[n - 1][0], dp[n - 1][1], tmp_max)
normal
{ "blob_id": "59b2c9d279168a806e59fb7529ab12d7b86107bc", "index": 5340, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n <mask token>\n", "step-3": "class Solution:\n <mask token>\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n\n def rob2(self, nums) ->int:\n n = len(nums)\n if nums == []:\n return 0\n if len(nums) == 1:\n return nums[0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = nums[0]\n dp[0][0] = float('-inf')\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n tmp_max = dp[n - 1][0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = float('-inf')\n dp[0][0] = 0\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\n", "step-4": "class Solution:\n\n def rob(self, nums) ->int:\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n return max(self.helper(nums[1:], n - 1), self.helper(nums[:-1], n - 1))\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n\n def rob2(self, nums) ->int:\n n = len(nums)\n if nums == []:\n return 0\n if len(nums) == 1:\n return nums[0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = nums[0]\n dp[0][0] = float('-inf')\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n tmp_max = dp[n - 1][0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = float('-inf')\n dp[0][0] = 0\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\n", "step-5": "# 213. 打家劫舍 II\r\n# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。\r\n# 同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。\r\n# 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。\r\n\r\nclass Solution:\r\n # 86.24%, 15.46%\r\n def rob(self, nums) -> int:\r\n n = len(nums)\r\n if n == 0:\r\n return 0\r\n if n == 1:\r\n return nums[0]\r\n return max(self.helper(nums[1:],n-1),self.helper(nums[:-1],n-1))\r\n\r\n\r\n def helper(self,nums,n):\r\n if n == 1:\r\n return nums[0]\r\n dp = [0] * n\r\n dp[0] = nums[0]\r\n dp[1] = max(nums[0], nums[1])\r\n for i in range(2, n):\r\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\r\n return dp[n-1]\r\n\r\n # 优秀解答\r\n def rob2(self, nums) -> int:\r\n n = len(nums)\r\n if nums == []:\r\n return 0\r\n if len(nums) == 1:\r\n return nums[0]\r\n # 抢了\r\n dp = [[0, 0] for _ in range(n)]\r\n dp[0][1] = nums[0]\r\n dp[0][0] = float('-inf')\r\n for i in range(1, n):\r\n dp[i][1] = dp[i - 1][0] + nums[i]\r\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\r\n tmp_max = dp[n - 1][0]\r\n\r\n # 没抢\r\n dp = [[0, 0] for _ in range(n)]\r\n dp[0][1] = float('-inf')\r\n dp[0][0] = 0\r\n for i in range(1, n):\r\n dp[i][1] = dp[i - 1][0] + nums[i]\r\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\r\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\r\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from random import random, randint, choice from copy import deepcopy from math import log """ Обертка для функций, которые будут находиться в узлах, представляющих функции. Его члены – имя функции, сама функция и количество принимаемых параметров. """ class fwrapper: def __init__(self, function, childcount, name): self.function = function self.childcount = childcount self.name = name """ Класс функциональных узлов (имеющих потомков). Инициализируется экземпляром класса fwrapper. Метод evaluate вычисляет значения дочерних узлов и передает их представленной данным узлом функции в качестве параметров. """ class node: def __init__(self, fw, children): self.function = fw.function self.name = fw.name self.children = children def evaluate(self, inp): results = [n.evaluate(inp) for n in self.children] return self.function(results) # Метод display выводит представление дерева в виде строки def display(self, indent=0): print((' ' * indent) + self.name) for c in self.children: c.display(indent + 1) """ Класс узлов, которые просто возвращают один из переданных программе параметров. Его метод evaluate возвращает параметр, соответствующий значению idx. """ class paramnode: def __init__(self, idx): self.idx = idx def evaluate(self, inp): return inp[self.idx] # Это метод просто печатает индекс возвращаемого параметра def display(self, indent=0): print('%sp%d' % (' ' * indent, self.idx)) """ Узлы, возвращающие константы. Метод evaluate просто возвращает то значение, которым экземпляр был инициализирован. """ class constnode: def __init__(self, v): self.v = v def evaluate(self, inp): return self.v def display(self, indent=0): print('%s%d' % (' ' * indent, self.v)) """ Простые функции типа add и subtract можно встроить с помощью лямбда-выражений. Для остальных функцию придется написать в отдельном блоке. В любом случае функция обертывается в экземпляр класса fwrapper вместе со своим именем и числом параметров. """ addw = fwrapper(lambda l: l[0] + l[1], 2, 'add') subw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract') mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply') def iffunc(l): if l[0] > 0: return l[1] else: return l[2] ifw = fwrapper(iffunc, 3, 'if') def isgreater(l): if l[0] > l[1]: return 1 else: return 0 gtw = fwrapper(isgreater, 2, 'isgreater') # В этой строке создается список всех функций, чтобы впоследствии из него # можно было выбирать элементы случайным образом. flist = [addw, mulw, ifw, gtw, subw] # C помощью класса node можно построить дерево программы (в качестве примера) def exampletree(): return node(ifw, [ node(gtw, [paramnode(0), constnode(3)]), node(addw, [paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)]), ] ) """ Эта функция создает узел, содержащий случайно выбранную функцию, и проверяет, сколько у этой функции должно быть параметров. Для каждого дочернего узла функция вызывает себя рекурсивно, чтобы создать новый узел. Так конструируется все дерево, причем процесс построения ветвей завершается в тот момент, когда у очередного узла нет дочерних (то есть он представляет либо константу, либо переменную-параметр). Параметр pc равен числу параметров, принимаемых деревом на входе. Параметр fpr задает вероятность того, что вновь создаваемый узел будет соответствовать функции, а ppr – вероятность того, что узел, не являющийся функцией, будет иметь тип paramnode. """ def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6): if random() < fpr and maxdepth > 0: f = choice(flist) children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in range(f.childcount)] return node(f, children) elif random() < ppr: return paramnode(randint(0, pc - 1)) else: return constnode(randint(0, 10)) def hiddenfunction(x, y): return x ** 2 + 2 * y + 3 * x + 5 def buildhiddenset(): rows = [] for i in range(200): x = randint(0, 40) y = randint(0, 40) rows.append([x, y, hiddenfunction(x, y)]) return rows """ Эта функция перебирает все строки набора данных, вычисляет функцию от указанных в ней аргументов и сравнивает с результатом. Абсолютные значения разностей суммируются. Чем меньше сумма, тем лучше программа, а значение 0 говорит о том, что все результаты в точности совпали. """ def scorefunction(tree, s): dif = 0 for data in s: v = tree.evaluate([data[0], data[1]]) dif += abs(v - data[2]) return dif """ Эта функция начинает с корня дерева и решает, следует ли изменить узел. Если нет, она рекурсивно вызывает mutate для дочерних узлов. Может случиться, что мутации подвергнутся все узлы, а иногда дерево вообще не изменится. """ # Мутация путем замены поддерева def mutate(t, pc, probchange=0.1): if random() < probchange: return makerandomtree(pc) else: result = deepcopy(t) if hasattr(t, "children"): result.children = [mutate(c, pc, probchange) for c in t.children] return result """ Функции, выполняющей скрещивание, передаются два дерева, и она обходит оба. Если случайно выбранное число не превышает пороговой вероятности, то функция возвращает копию первого дерева, в которой одна из ветвей заменена какой-то ветвью, взятой из второго дерева. Поскольку обход выполняется параллельно, то скрещивание произойдет примерно на одном уровне каждого дерева. """ # Функция скрещивания. Две успешные программы комбинируются с целью получения новой программы. def crossover(t1, t2, probswap=0.7, top=1): if random() < probswap and not top: return deepcopy(t2) else: result = deepcopy(t1) if hasattr(t1, 'children') and hasattr(t2, 'children'): result.children = [crossover(c, choice(t2.children), probswap, 0) for c in t1.children] return result # Функция возвращает функцию ранжирования для имеющегося набора данных def getrankfunction(dataset): def rankfunction(population): scores = [(scorefunction(t, dataset), t) for t in population] scores.sort() return scores return rankfunction """ Создание конкурентной среды, в которой программы будут эволюционировать. Смысл в том, чтобы создать набор случайных программ, отобрать из них наилучшие для копирования и модификации и повторять процесс, пока не будет выполнено некое условие останова. """ def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05): """Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла, вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей. Наилучшая программа автоматически попадает в следующее поколение без изменения. Args: rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей. mutationrate: Вероятность мутации, передаваемая функции mutate. breedingrate: Вероятность скрещивания, передаваемая функции crossover. popsize: Размер исходной популяции. probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/ probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы. Returns: tuple: Найденное наилучшее совпадние """ # Возвращает случайное число, отдавая предпочтение более маленьким числам. # Чем меньше значение pexp, тем больше будет доля маленьких чисел. def selectindex(): return int(log(random()) / log(pexp)) # Создаем случайную исходную популяцию population = [makerandomtree(pc) for i in range(popsize)] for i in range(maxgen): scores = rankfunction(population) print(scores[0][0]) if scores[0][0] == 0: break # Две наилучшие особи отбираются всегда newpop = [scores[0][1], scores[1][1]] # Строим следующее поколение while len(newpop) < popsize: if random() > pnew: newpop.append(mutate( crossover(scores[selectindex()][1], scores[selectindex()][1], probswap=breedingrate), pc, probchange=mutationrate)) else: # Добавляем случайный узел для внесения неопределенности newpop.append(makerandomtree(pc)) population = newpop scores[0][1].display() return scores[0][1] #[ # (10, "program1"), # (17, "program2"), #] def gridgame(p): # Размер доски max = (3, 3) # Запоминаем последний ход каждого игрока lastmove = [-1, -1] # Запоминаем положения игроков location = [[randint(0, max[0]), randint(0, max[1])]] # Располагаем второго игрока на достаточном удалении от первого location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4]) # Не более 50 ходов до объявления ничьей for o in range(50): # Для каждого игрока for i in range(2): locs = location[i][:] + location[1 - i][:] locs.append(lastmove[i]) move = p[i].evaluate(locs) % 4 # Если игрок два раза подряд ходит в одном направлении, ему # засчитывается проигрыш if lastmove[i] == move: return 1 - i lastmove[i] = move if move == 0: location[i][0] -= 1 # Доска ограничена if location[i][0] < 0: location[i][0] = 0 if move == 1: location[i][0] += 1 if location[i][0] > max[0]: location[i][0] = max[0] if move == 2: location[i][1] -= 1 if location[i][1] < 0: location[i][1] = 0 if move == 3: location[i][1] += 1 if location[i][1] > max[1]: location[i][1] = max[1] # Если противник захвачен в плен, вы выиграли if location[i] == location[1 - i]: return i return -1 def tournament(pl): # Массив для подсчета проигрышей losses = [0 for p in pl] # Каждый игрок встречается со всеми другими for i in range(len(pl)): for j in range(len(pl)): if i == j: continue # Кто выиграл? winner = gridgame([pl[i], pl[j]]) # Два очка за поражение, одно за ничью if winner == 0: losses[j] += 2 elif winner == 1: losses[i] += 2 elif winner == -1: losses[i] += 1 losses[i] += 1 pass # Отсортировать и вернуть результаты z = list(zip(losses, pl)) z.sort(key=lambda t: t[0]) # input() print(z[0][1].display(indent=4)) return z class humanplayer: def evaluate(self, board): # Получить мою позицию и позиции других игроков me = tuple(board[0:2]) others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)] # Нарисовать доску for i in range(4): for j in range(4): if (i, j) == me: print('O',end=' ') elif (i, j) in others: print('X',end=' ') else: print('.',end=' ') print() # Показать ходы, для справки print('Your last move was %d' % board[len(board) - 1]) print(' 0') print('2 3') print(' 1') print('Enter move: ') # Вернуть введенное пользователем число move = int(input()) return move class fwrapper: def __init__(self, function, params, name): self.function = function self.childcount = params self.name = name # flist={'str':[substringw,concatw],'int':[indexw]} flist = [addw, mulw, ifw, gtw, subw]
normal
{ "blob_id": "89881f3cc6703b3f43f5d2dae87fa943d8a21513", "index": 5485, "step-1": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef isgreater(l):\n if l[0] > l[1]:\n return 1\n else:\n return 0\n\n\n<mask token>\n\n\ndef exampletree():\n return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [\n paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]\n )\n\n\n<mask token>\n\n\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\n if random() < fpr and maxdepth > 0:\n f = choice(flist)\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in\n range(f.childcount)]\n return node(f, children)\n elif random() < ppr:\n return paramnode(randint(0, pc - 1))\n else:\n return constnode(randint(0, 10))\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef isgreater(l):\n if l[0] > l[1]:\n return 1\n else:\n return 0\n\n\n<mask token>\n\n\ndef exampletree():\n return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [\n paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]\n )\n\n\n<mask token>\n\n\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\n if random() < fpr and maxdepth > 0:\n f = choice(flist)\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in\n range(f.childcount)]\n return node(f, children)\n elif random() < ppr:\n return paramnode(randint(0, pc - 1))\n else:\n return constnode(randint(0, 10))\n\n\ndef hiddenfunction(x, y):\n return x ** 2 + 2 * y + 3 * x + 5\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef crossover(t1, t2, probswap=0.7, top=1):\n if random() < probswap and not top:\n return deepcopy(t2)\n else:\n result = deepcopy(t1)\n if hasattr(t1, 'children') and hasattr(t2, 'children'):\n result.children = [crossover(c, choice(t2.children), probswap, \n 0) for c in t1.children]\n return result\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\ndef gridgame(p):\n max = 3, 3\n lastmove = [-1, -1]\n location = [[randint(0, max[0]), randint(0, max[1])]]\n location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])\n for o in range(50):\n for i in range(2):\n locs = location[i][:] + location[1 - i][:]\n locs.append(lastmove[i])\n move = p[i].evaluate(locs) % 4\n if lastmove[i] == move:\n return 1 - i\n lastmove[i] = move\n if move == 0:\n location[i][0] -= 1\n if location[i][0] < 0:\n location[i][0] = 0\n if move == 1:\n location[i][0] += 1\n if location[i][0] > max[0]:\n location[i][0] = max[0]\n if move == 2:\n location[i][1] -= 1\n if location[i][1] < 0:\n location[i][1] = 0\n if move == 3:\n location[i][1] += 1\n if location[i][1] > max[1]:\n location[i][1] = max[1]\n if location[i] == location[1 - i]:\n return i\n return -1\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n", "step-5": "from random import random, randint, choice\r\nfrom copy import deepcopy\r\nfrom math import log\r\n\r\n\"\"\"\r\nОбертка для функций, которые будут находиться в узлах,\r\nпредставляющих функции. Его члены – имя функции, сама функция\r\nи количество принимаемых параметров.\r\n\"\"\"\r\nclass fwrapper:\r\n def __init__(self, function, childcount, name):\r\n self.function = function\r\n self.childcount = childcount\r\n self.name = name\r\n\r\n\"\"\"\r\nКласс функциональных узлов (имеющих потомков). Инициализируется экземпляром класса fwrapper.\r\nМетод evaluate вычисляет значения дочерних узлов и передает их представленной данным узлом\r\nфункции в качестве параметров.\r\n\"\"\"\r\nclass node:\r\n def __init__(self, fw, children):\r\n self.function = fw.function\r\n self.name = fw.name\r\n self.children = children\r\n\r\n def evaluate(self, inp):\r\n results = [n.evaluate(inp) for n in self.children]\r\n return self.function(results)\r\n \r\n # Метод display выводит представление дерева в виде строки\r\n def display(self, indent=0):\r\n print((' ' * indent) + self.name)\r\n for c in self.children:\r\n c.display(indent + 1)\r\n\r\n\"\"\"\r\nКласс узлов, которые просто возвращают один из переданных программе параметров.\r\nЕго метод evaluate возвращает параметр, соответствующий значению idx.\r\n\"\"\"\r\nclass paramnode:\r\n def __init__(self, idx):\r\n self.idx = idx\r\n\r\n def evaluate(self, inp):\r\n return inp[self.idx]\r\n \r\n # Это метод просто печатает индекс возвращаемого параметра\r\n def display(self, indent=0):\r\n print('%sp%d' % (' ' * indent, self.idx))\r\n\r\n\"\"\"\r\nУзлы, возвращающие константы. Метод evaluate просто возвращает\r\nто значение, которым экземпляр был инициализирован.\r\n\"\"\"\r\nclass constnode:\r\n def __init__(self, v):\r\n self.v = v\r\n\r\n def evaluate(self, inp):\r\n return self.v\r\n\r\n def display(self, indent=0):\r\n print('%s%d' % (' ' * indent, self.v))\r\n\r\n \r\n\"\"\"\r\nПростые функции типа add и subtract можно встроить с помощью лямбда-выражений.\r\nДля остальных функцию придется написать в отдельном блоке.\r\nВ любом случае функция обертывается в экземпляр класса fwrapper \r\nвместе со своим именем и числом параметров.\r\n\"\"\"\r\n\r\naddw = fwrapper(lambda l: l[0] + l[1], 2, 'add')\r\nsubw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')\r\nmulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')\r\n\r\n\r\ndef iffunc(l):\r\n if l[0] > 0:\r\n return l[1]\r\n else:\r\n return l[2]\r\n\r\n\r\nifw = fwrapper(iffunc, 3, 'if')\r\n\r\n\r\ndef isgreater(l):\r\n if l[0] > l[1]:\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ngtw = fwrapper(isgreater, 2, 'isgreater')\r\n\r\n# В этой строке создается список всех функций, чтобы впоследствии из него\r\n# можно было выбирать элементы случайным образом.\r\nflist = [addw, mulw, ifw, gtw, subw]\r\n\r\n# C помощью класса node можно построить дерево программы (в качестве примера)\r\ndef exampletree():\r\n return node(ifw, [\r\n node(gtw, [paramnode(0), constnode(3)]),\r\n node(addw, [paramnode(1), constnode(5)]),\r\n node(subw, [paramnode(1), constnode(2)]),\r\n ]\r\n )\r\n\r\n\r\n\"\"\"\r\nЭта функция создает узел, содержащий случайно выбранную функцию, и проверяет,\r\nсколько у этой функции должно быть параметров. Для каждого дочернего узла функция\r\nвызывает себя рекурсивно, чтобы создать новый узел. Так конструируется все дерево,\r\nпричем процесс построения ветвей завершается в тот момент, когда у очередного узла \r\nнет дочерних (то есть он представляет либо константу, либо переменную-параметр).\r\nПараметр pc равен числу параметров, принимаемых деревом на входе. Параметр fpr\r\nзадает вероятность того, что вновь создаваемый узел будет соответствовать функции,\r\nа ppr – вероятность того, что узел, не являющийся функцией, будет иметь тип paramnode.\r\n\"\"\"\r\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\r\n if random() < fpr and maxdepth > 0:\r\n f = choice(flist)\r\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)\r\n for i in range(f.childcount)]\r\n return node(f, children)\r\n elif random() < ppr:\r\n return paramnode(randint(0, pc - 1))\r\n else:\r\n return constnode(randint(0, 10))\r\n\r\n\r\ndef hiddenfunction(x, y):\r\n return x ** 2 + 2 * y + 3 * x + 5\r\n\r\n\r\ndef buildhiddenset():\r\n rows = []\r\n for i in range(200):\r\n x = randint(0, 40)\r\n y = randint(0, 40)\r\n rows.append([x, y, hiddenfunction(x, y)])\r\n return rows\r\n\r\n\r\n\"\"\"\r\nЭта функция перебирает все строки набора данных, вычисляет функцию от указанных \r\nв ней аргументов и сравнивает с результатом. Абсолютные значения разностей суммируются.\r\nЧем меньше сумма, тем лучше программа, а значение 0 говорит о том, что все результаты \r\nв точности совпали. \r\n\"\"\"\r\ndef scorefunction(tree, s):\r\n dif = 0\r\n for data in s:\r\n v = tree.evaluate([data[0], data[1]])\r\n dif += abs(v - data[2])\r\n return dif\r\n\r\n\r\n\"\"\"\r\nЭта функция начинает с корня дерева и решает, следует ли изменить\r\nузел. Если нет, она рекурсивно вызывает mutate для дочерних узлов.\r\nМожет случиться, что мутации подвергнутся все узлы, а иногда дерево\r\nвообще не изменится.\r\n\"\"\"\r\n# Мутация путем замены поддерева\r\ndef mutate(t, pc, probchange=0.1):\r\n if random() < probchange:\r\n return makerandomtree(pc)\r\n else:\r\n result = deepcopy(t)\r\n if hasattr(t, \"children\"):\r\n result.children = [mutate(c, pc, probchange) for c in t.children]\r\n return result\r\n\r\n\"\"\"\r\nФункции, выполняющей скрещивание, передаются два дерева, и она\r\nобходит оба. Если случайно выбранное число не превышает пороговой\r\nвероятности, то функция возвращает копию первого дерева, в которой\r\nодна из ветвей заменена какой-то ветвью, взятой из второго дерева.\r\nПоскольку обход выполняется параллельно, то скрещивание произойдет примерно на одном уровне каждого дерева.\r\n\"\"\"\r\n# Функция скрещивания. Две успешные программы комбинируются с целью получения новой программы.\r\ndef crossover(t1, t2, probswap=0.7, top=1):\r\n if random() < probswap and not top:\r\n return deepcopy(t2)\r\n else:\r\n result = deepcopy(t1)\r\n if hasattr(t1, 'children') and hasattr(t2, 'children'):\r\n result.children = [crossover(c, choice(t2.children), probswap, 0)\r\n for c in t1.children]\r\n return result\r\n\r\n# Функция возвращает функцию ранжирования для имеющегося набора данных\r\ndef getrankfunction(dataset):\r\n def rankfunction(population):\r\n scores = [(scorefunction(t, dataset), t) for t in population]\r\n scores.sort()\r\n return scores\r\n\r\n return rankfunction\r\n\r\n\r\n\"\"\"\r\nСоздание конкурентной среды, в которой программы будут эволюционировать.\r\nСмысл в том, чтобы создать набор случайных программ, отобрать из них\r\nнаилучшие для копирования и модификации и повторять процесс, пока не будет\r\nвыполнено некое условие останова.\r\n\"\"\"\r\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):\r\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\r\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\r\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \r\n Args:\r\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\r\n mutationrate: Вероятность мутации, передаваемая функции mutate.\r\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\r\n popsize: Размер исходной популяции.\r\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\r\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\r\n\r\n Returns:\r\n tuple: Найденное наилучшее совпадние\r\n\r\n \"\"\"\r\n # Возвращает случайное число, отдавая предпочтение более маленьким числам.\r\n # Чем меньше значение pexp, тем больше будет доля маленьких чисел.\r\n def selectindex():\r\n return int(log(random()) / log(pexp))\r\n\r\n # Создаем случайную исходную популяцию\r\n population = [makerandomtree(pc) for i in range(popsize)]\r\n for i in range(maxgen):\r\n scores = rankfunction(population)\r\n print(scores[0][0])\r\n if scores[0][0] == 0: break\r\n\r\n # Две наилучшие особи отбираются всегда\r\n newpop = [scores[0][1], scores[1][1]]\r\n\r\n # Строим следующее поколение\r\n while len(newpop) < popsize:\r\n if random() > pnew:\r\n newpop.append(mutate(\r\n crossover(scores[selectindex()][1],\r\n scores[selectindex()][1],\r\n probswap=breedingrate),\r\n pc, probchange=mutationrate))\r\n else:\r\n # Добавляем случайный узел для внесения неопределенности\r\n newpop.append(makerandomtree(pc))\r\n\r\n population = newpop\r\n scores[0][1].display()\r\n return scores[0][1]\r\n\r\n#[\r\n# (10, \"program1\"),\r\n# (17, \"program2\"),\r\n#]\r\n\r\ndef gridgame(p):\r\n # Размер доски\r\n max = (3, 3)\r\n\r\n # Запоминаем последний ход каждого игрока\r\n lastmove = [-1, -1]\r\n\r\n # Запоминаем положения игроков\r\n location = [[randint(0, max[0]), randint(0, max[1])]]\r\n\r\n # Располагаем второго игрока на достаточном удалении от первого\r\n location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])\r\n # Не более 50 ходов до объявления ничьей\r\n for o in range(50):\r\n\r\n # Для каждого игрока\r\n for i in range(2):\r\n locs = location[i][:] + location[1 - i][:]\r\n locs.append(lastmove[i])\r\n move = p[i].evaluate(locs) % 4\r\n\r\n # Если игрок два раза подряд ходит в одном направлении, ему\r\n # засчитывается проигрыш\r\n if lastmove[i] == move: return 1 - i\r\n lastmove[i] = move\r\n if move == 0:\r\n location[i][0] -= 1\r\n # Доска ограничена\r\n if location[i][0] < 0: location[i][0] = 0\r\n if move == 1:\r\n location[i][0] += 1\r\n if location[i][0] > max[0]: location[i][0] = max[0]\r\n if move == 2:\r\n location[i][1] -= 1\r\n if location[i][1] < 0: location[i][1] = 0\r\n if move == 3:\r\n location[i][1] += 1\r\n if location[i][1] > max[1]: location[i][1] = max[1]\r\n\r\n # Если противник захвачен в плен, вы выиграли\r\n if location[i] == location[1 - i]: return i\r\n return -1\r\n\r\n\r\ndef tournament(pl):\r\n # Массив для подсчета проигрышей\r\n losses = [0 for p in pl]\r\n\r\n # Каждый игрок встречается со всеми другими\r\n for i in range(len(pl)):\r\n for j in range(len(pl)):\r\n if i == j: continue\r\n\r\n # Кто выиграл?\r\n winner = gridgame([pl[i], pl[j]])\r\n\r\n # Два очка за поражение, одно за ничью\r\n if winner == 0:\r\n losses[j] += 2\r\n elif winner == 1:\r\n losses[i] += 2\r\n elif winner == -1:\r\n losses[i] += 1\r\n losses[i] += 1\r\n pass\r\n\r\n # Отсортировать и вернуть результаты\r\n z = list(zip(losses, pl))\r\n z.sort(key=lambda t: t[0])\r\n # input()\r\n print(z[0][1].display(indent=4))\r\n return z\r\n\r\nclass humanplayer:\r\n def evaluate(self, board):\r\n\r\n # Получить мою позицию и позиции других игроков\r\n me = tuple(board[0:2])\r\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\r\n\r\n # Нарисовать доску\r\n for i in range(4):\r\n for j in range(4):\r\n if (i, j) == me:\r\n print('O',end=' ')\r\n elif (i, j) in others:\r\n print('X',end=' ')\r\n else:\r\n print('.',end=' ')\r\n print()\r\n\r\n # Показать ходы, для справки\r\n print('Your last move was %d' % board[len(board) - 1])\r\n print(' 0')\r\n print('2 3')\r\n print(' 1')\r\n print('Enter move: ')\r\n\r\n # Вернуть введенное пользователем число\r\n move = int(input())\r\n return move\r\n\r\n\r\nclass fwrapper:\r\n def __init__(self, function, params, name):\r\n self.function = function\r\n self.childcount = params\r\n self.name = name\r\n\r\n\r\n# flist={'str':[substringw,concatw],'int':[indexw]}\r\nflist = [addw, mulw, ifw, gtw, subw]\r\n", "step-ids": [ 23, 25, 28, 31, 34 ] }
[ 23, 25, 28, 31, 34 ]
def towers_of_hanoi(n, src, dest, temp,res): if n==1: s = 'disk 1 from ',src,'->',dest res.append(s) return towers_of_hanoi(n-1, src, temp, dest, res) s = 'disk ',n, ' from ',src,'->',dest res.append(s) towers_of_hanoi(n-1, temp, dest, src, res) return res def steps_in_tower_of_hanoi(no_of_disks): res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[]) return res if __name__ == "__main__": no_of_disks = int(input()) res = steps_in_tower_of_hanoi(no_of_disks) print('\n'.join([' '.join(map(str, x)) for x in res])) print('\n')
normal
{ "blob_id": "f23bfef2daf8fda4249435821dbc2e0b1846e3d6", "index": 9842, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n", "step-3": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n", "step-4": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\nif __name__ == '__main__':\n no_of_disks = int(input())\n res = steps_in_tower_of_hanoi(no_of_disks)\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n", "step-5": "\ndef towers_of_hanoi(n, src, dest, temp,res):\n if n==1:\n s = 'disk 1 from ',src,'->',dest\n res.append(s)\n return\n towers_of_hanoi(n-1, src, temp, dest, res)\n s = 'disk ',n, ' from ',src,'->',dest\n res.append(s)\n towers_of_hanoi(n-1, temp, dest, src, res)\n return res\n \ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])\n return res\n\nif __name__ == \"__main__\":\n\n no_of_disks = int(input())\n\n res = steps_in_tower_of_hanoi(no_of_disks)\n\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
_base_ = "../model.py" model = dict( type="ImageClassifier", task="classification", pretrained=None, backbone=dict(), head=dict(in_channels=-1, loss=dict(type="CrossEntropyLoss", loss_weight=1.0), topk=(1, 5)), ) checkpoint_config = dict(type="CheckpointHookWithValResults")
normal
{ "blob_id": "8bd5eff12e68f7145676f5e089b51376a82ab489", "index": 3231, "step-1": "<mask token>\n", "step-2": "_base_ = '../model.py'\nmodel = dict(type='ImageClassifier', task='classification', pretrained=None,\n backbone=dict(), head=dict(in_channels=-1, loss=dict(type=\n 'CrossEntropyLoss', loss_weight=1.0), topk=(1, 5)))\ncheckpoint_config = dict(type='CheckpointHookWithValResults')\n", "step-3": "_base_ = \"../model.py\"\n\nmodel = dict(\n type=\"ImageClassifier\",\n task=\"classification\",\n pretrained=None,\n backbone=dict(),\n head=dict(in_channels=-1, loss=dict(type=\"CrossEntropyLoss\", loss_weight=1.0), topk=(1, 5)),\n)\n\ncheckpoint_config = dict(type=\"CheckpointHookWithValResults\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
num=5 a=5 for row in range(num,0,-1): for col in range(row,0,-1): print(a,end="") a-=1 print()
normal
{ "blob_id": "a567a2dc1dbb59979d849a5a772e4592910a9f27", "index": 2783, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor row in range(num, 0, -1):\n for col in range(row, 0, -1):\n print(a, end='')\n a -= 1\n print()\n", "step-3": "num = 5\na = 5\nfor row in range(num, 0, -1):\n for col in range(row, 0, -1):\n print(a, end='')\n a -= 1\n print()\n", "step-4": "num=5\r\na=5\r\nfor row in range(num,0,-1):\r\n for col in range(row,0,-1):\r\n print(a,end=\"\")\r\n a-=1\r\n print()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from airflow.plugins_manager import AirflowPlugin from flask import Blueprint, Flask from rest_api.log.views import views from rest_api.route.log_route import log from rest_api.route.mylog_route import my_log_pb from rest_api.route.native_log_route import native_log_bp class AirflowPlugin(AirflowPlugin): name = "airflow-plugin" operators = [] # Leave in for explicitness hooks = [] executors = [] macros = [] admin_views = [] flask_blueprints = [] menu_links = [] # 创建Blueprint实例 # Blueprint实例创建之后我们就可以通过@Blueprint实例名.route('/')语法为我们的模块创建路由 airflow_bp = Blueprint( 'airflow_bp', __name__ ) app = Flask(__name__) # 注册我们在views.py模块中创建的蓝图实例views, 并将他的URL前缀设置为`/views` app.register_blueprint(views, url_prefix='/views') app.register_blueprint(log, url_prefix='/') app.register_blueprint(native_log_bp, url_prefix='/native_log') app.register_blueprint(my_log_pb, url_prefix='/my_log') if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "39f1fc04911f8d22d07532add24cd1671a569e72", "index": 9414, "step-1": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n<mask token>\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\nairflow_bp = Blueprint('airflow_bp', __name__)\napp = Flask(__name__)\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from airflow.plugins_manager import AirflowPlugin\nfrom flask import Blueprint, Flask\nfrom rest_api.log.views import views\nfrom rest_api.route.log_route import log\nfrom rest_api.route.mylog_route import my_log_pb\nfrom rest_api.route.native_log_route import native_log_bp\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\nairflow_bp = Blueprint('airflow_bp', __name__)\napp = Flask(__name__)\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "from airflow.plugins_manager import AirflowPlugin\nfrom flask import Blueprint, Flask\nfrom rest_api.log.views import views\nfrom rest_api.route.log_route import log\nfrom rest_api.route.mylog_route import my_log_pb\nfrom rest_api.route.native_log_route import native_log_bp\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = \"airflow-plugin\"\n operators = []\n # Leave in for explicitness\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n# 创建Blueprint实例\n# Blueprint实例创建之后我们就可以通过@Blueprint实例名.route('/')语法为我们的模块创建路由\nairflow_bp = Blueprint(\n 'airflow_bp',\n __name__\n)\n\n\napp = Flask(__name__)\n\n# 注册我们在views.py模块中创建的蓝图实例views, 并将他的URL前缀设置为`/views`\napp.register_blueprint(views, url_prefix='/views')\n\napp.register_blueprint(log, url_prefix='/')\n\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\n\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.http import HttpResponse from django.views.decorators.http import require_http_methods from django.shortcuts import render, redirect from app.models import PaidTimeOff, Schedule from django.utils import timezone from django.contrib import messages from app.decorators import user_is_authenticated from app.views import utils @require_http_methods(["GET", "POST"]) @user_is_authenticated def index(request, user_id): user = utils.current_user(request) if not user: return HttpResponse("User " + str(user_id) + " NOT FOUND") pto = PaidTimeOff.objects.filter(user=user).first() if not pto: return HttpResponse("PTO " + str(user_id) + " NOT FOUND") if request.method == "GET": return index_get(request, user_id, user, pto) elif request.method == "POST": return index_post(request, user_id, user, pto) else: return HttpResponse("Invalid HTTP method") def index_get(request, user_id, user, pto): # pylint: disable=unused-argument schedules = Schedule.to_calendar((Schedule.objects.filter(pto=pto))) context = pto.__dict__ context.update({"schedules": schedules, "current_user": user}) return render(request, "users/paid_time_off.html", context=context) def index_post(request, user_id, user, pto): form = request.POST if not form: return HttpResponse("No form found") err_msg = PaidTimeOff.validate_PTO_form(form) if len(err_msg) > 0: messages.add_message(request, messages.INFO, err_msg) else: try: date_begin = Schedule.reformat(form['date_begin']) date_end = Schedule.reformat(form['date_end']) Schedule.objects.create( user=user, pto=pto, date_begin=date_begin, date_end=date_end, event_name=form['event_name'], event_type='PTO', event_desc=form['event_description'], created_at=timezone.now(), updated_at=timezone.now()) messages.add_message(request, messages.INFO, "Information successfully updated") except Exception as e: messages.add_message(request, messages.INFO, str(e)) url = "/users/%s/paid_time_off/" % user_id return redirect(url, permanent=False)
normal
{ "blob_id": "7245d4db6440d38b9302907a6203c1507c373112", "index": 6970, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n", "step-3": "<mask token>\n\n\n@require_http_methods(['GET', 'POST'])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse('User ' + str(user_id) + ' NOT FOUND')\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse('PTO ' + str(user_id) + ' NOT FOUND')\n if request.method == 'GET':\n return index_get(request, user_id, user, pto)\n elif request.method == 'POST':\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse('Invalid HTTP method')\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n", "step-4": "from django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render, redirect\nfrom app.models import PaidTimeOff, Schedule\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom app.decorators import user_is_authenticated\nfrom app.views import utils\n\n\n@require_http_methods(['GET', 'POST'])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse('User ' + str(user_id) + ' NOT FOUND')\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse('PTO ' + str(user_id) + ' NOT FOUND')\n if request.method == 'GET':\n return index_get(request, user_id, user, pto)\n elif request.method == 'POST':\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse('Invalid HTTP method')\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n", "step-5": "\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render, redirect\nfrom app.models import PaidTimeOff, Schedule\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom app.decorators import user_is_authenticated\nfrom app.views import utils\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse(\"User \" + str(user_id) + \" NOT FOUND\")\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse(\"PTO \" + str(user_id) + \" NOT FOUND\")\n if request.method == \"GET\":\n return index_get(request, user_id, user, pto)\n elif request.method == \"POST\":\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse(\"Invalid HTTP method\")\n\n\ndef index_get(request, user_id, user, pto): # pylint: disable=unused-argument\n schedules = Schedule.to_calendar((Schedule.objects.filter(pto=pto)))\n context = pto.__dict__\n context.update({\"schedules\": schedules, \"current_user\": user})\n return render(request, \"users/paid_time_off.html\",\n context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse(\"No form found\")\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(\n user=user, pto=pto, date_begin=date_begin,\n date_end=date_end, event_name=form['event_name'],\n event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n \"Information successfully updated\")\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = \"/users/%s/paid_time_off/\" % user_id\n return redirect(url, permanent=False)\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
""" 给定两个非空链表来代表两个非负整数,位数按照逆序方式存储,它们的每个节点只存储单个数字。将这两数相加会返回一个新的链表。 你可以假设除了数字 0 之外,这两个数字都不会以零开头。 输入:(2 -> 4 -> 3) + (5 -> 6 -> 4) 输出:7 -> 0 -> 8 原因:342 + 465 = 807 """ """ 解题思路: 先计算两个节点的值和与进位的和 然后将值对10取余存放到新的链表中 循环下去 直到l1 l2 进位都不存在 """ # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def addTwoNumbers(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ ret = ListNode(0) cur = ret add = 0 while l1 or l2 or add: val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add add = val // 10 cur.next = ListNode(val % 10) cur = cur.next l1 = l1.next if l1.next else None l2 = l2.next if l2.next else None return ret.next
normal
{ "blob_id": "80f681eb99d1e3f64cacd23ce0a4b10a74a79fe8", "index": 4223, "step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n ret = ListNode(0)\n cur = ret\n add = 0\n while l1 or l2 or add:\n val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add\n add = val // 10\n cur.next = ListNode(val % 10)\n cur = cur.next\n l1 = l1.next if l1.next else None\n l2 = l2.next if l2.next else None\n return ret.next\n", "step-3": "<mask token>\n\n\nclass ListNode:\n <mask token>\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n ret = ListNode(0)\n cur = ret\n add = 0\n while l1 or l2 or add:\n val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add\n add = val // 10\n cur.next = ListNode(val % 10)\n cur = cur.next\n l1 = l1.next if l1.next else None\n l2 = l2.next if l2.next else None\n return ret.next\n", "step-4": "<mask token>\n\n\nclass ListNode:\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n ret = ListNode(0)\n cur = ret\n add = 0\n while l1 or l2 or add:\n val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add\n add = val // 10\n cur.next = ListNode(val % 10)\n cur = cur.next\n l1 = l1.next if l1.next else None\n l2 = l2.next if l2.next else None\n return ret.next\n", "step-5": "\"\"\"\n给定两个非空链表来代表两个非负整数,位数按照逆序方式存储,它们的每个节点只存储单个数字。将这两数相加会返回一个新的链表。\n\n你可以假设除了数字 0 之外,这两个数字都不会以零开头。\n输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)\n输出:7 -> 0 -> 8\n原因:342 + 465 = 807\n\"\"\"\n\n\"\"\"\n解题思路:\n先计算两个节点的值和与进位的和\n然后将值对10取余存放到新的链表中\n循环下去\n直到l1 l2 进位都不存在\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n ret = ListNode(0)\n cur = ret\n add = 0\n while l1 or l2 or add:\n val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add\n add = val // 10\n cur.next = ListNode(val % 10)\n cur = cur.next\n l1 = l1.next if l1.next else None\n l2 = l2.next if l2.next else None\n return ret.next\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from collections import OrderedDict class LRU_Cache(object): def __init__(self, capacity): # Initialize class variables self.size = capacity self.jar = OrderedDict() pass def get(self, key): # Retrieve item from provided key. Return -1 if nonexistent. if key not in self.jar: return -1 else: rtn = self.jar.get(key) self.jar.move_to_end(key) return rtn def set(self, key, value): # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item. if key is None: return if len(self.jar) == self.size: self.jar.popitem(last=False) self.jar[key] = value else: self.jar[key] = value return def __str__(self): return f'{self.jar}' def test_1(): '''Basically testing to see if the cache can store and recall info''' our_cache = LRU_Cache(5) our_cache.set(1, 1) our_cache.set(2, 2) our_cache.set(3, 3) our_cache.set(4, 4) print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1') def test_2(): '''testing to see if the least used object gets removed''' our_cache = LRU_Cache(5) our_cache.set(1, 1) our_cache.set(2, 2) our_cache.set(3, 3) our_cache.set(4, 4) our_cache.set(5, 5) our_cache.get(1) our_cache.set(6, 6) print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1') def test_3(): '''entering null key to be set, should not work''' our_cache = LRU_Cache(5) [our_cache.set(None, 1) for _ in range(5)] print(f'Current Cache state: {our_cache} expected result is for it to be empty') def test_4(): '''0 capacity test case''' our_cache = LRU_Cache(0) [our_cache.set(None, 1) for _ in range(5)] print(f'Current Cache state: {our_cache} expected result is for it to be empty') if __name__ == "__main__": test_1() test_2() test_3() test_4()
normal
{ "blob_id": "3c88e13e8796c5f39180a9a514f0528a074460a6", "index": 2198, "step-1": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\n<mask token>\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\n<mask token>\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n \"\"\"Basically testing to see if the cache can store and recall info\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\nif __name__ == '__main__':\n test_1()\n test_2()\n test_3()\n test_4()\n", "step-4": "from collections import OrderedDict\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n\n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n \"\"\"Basically testing to see if the cache can store and recall info\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n \"\"\"testing to see if the least used object gets removed\"\"\"\n our_cache = LRU_Cache(5)\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5)\n our_cache.get(1)\n our_cache.set(6, 6)\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\n\ndef test_3():\n \"\"\"entering null key to be set, should not work\"\"\"\n our_cache = LRU_Cache(5)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\ndef test_4():\n \"\"\"0 capacity test case\"\"\"\n our_cache = LRU_Cache(0)\n [our_cache.set(None, 1) for _ in range(5)]\n print(\n f'Current Cache state: {our_cache} expected result is for it to be empty'\n )\n\n\nif __name__ == '__main__':\n test_1()\n test_2()\n test_3()\n test_4()\n", "step-5": "from collections import OrderedDict\nclass LRU_Cache(object):\n def __init__(self, capacity):\n # Initialize class variables\n self.size = capacity\n self.jar = OrderedDict()\n pass\n\n def get(self, key):\n # Retrieve item from provided key. Return -1 if nonexistent.\n if key not in self.jar:\n return -1\n else:\n rtn = self.jar.get(key)\n self.jar.move_to_end(key)\n return rtn\n\n def set(self, key, value):\n # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.\n if key is None:\n return\n if len(self.jar) == self.size:\n self.jar.popitem(last=False)\n self.jar[key] = value\n else:\n self.jar[key] = value\n return\n \n def __str__(self):\n return f'{self.jar}'\n\n\ndef test_1():\n '''Basically testing to see if the cache can store and recall info'''\n our_cache = LRU_Cache(5)\n\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n\n print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')\n\n\ndef test_2():\n '''testing to see if the least used object gets removed'''\n our_cache = LRU_Cache(5)\n\n our_cache.set(1, 1)\n our_cache.set(2, 2)\n our_cache.set(3, 3)\n our_cache.set(4, 4)\n our_cache.set(5, 5) \n\n our_cache.get(1)\n\n our_cache.set(6, 6)\n\n\n\n print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')\n\ndef test_3():\n '''entering null key to be set, should not work'''\n our_cache = LRU_Cache(5)\n\n [our_cache.set(None, 1) for _ in range(5)]\n\n print(f'Current Cache state: {our_cache} expected result is for it to be empty')\n\ndef test_4():\n '''0 capacity test case'''\n our_cache = LRU_Cache(0)\n\n [our_cache.set(None, 1) for _ in range(5)]\n\n print(f'Current Cache state: {our_cache} expected result is for it to be empty')\n\n \n\nif __name__ == \"__main__\":\n test_1()\n test_2()\n test_3()\n test_4()\n", "step-ids": [ 6, 8, 10, 11, 12 ] }
[ 6, 8, 10, 11, 12 ]
from queuingservices.managers.queue_lifecycle_manager import QueueLifecycleManager from queuingservices.managers.queue_publisher_manager import QueuePublisherManager from queuingservices.managers.queue_subscriber_manager import QueueSubscriberManager class QueueMaster(QueueSubscriberManager, QueuePublisherManager, QueueLifecycleManager): """ This class interfaces all types of queue objects that you might want. """ pass
normal
{ "blob_id": "b2b961c6ff1d975d80a84be361321ab44dc026a0", "index": 2134, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass QueueMaster(QueueSubscriberManager, QueuePublisherManager,\n QueueLifecycleManager):\n <mask token>\n pass\n", "step-3": "<mask token>\n\n\nclass QueueMaster(QueueSubscriberManager, QueuePublisherManager,\n QueueLifecycleManager):\n \"\"\"\n\n This class interfaces all types of queue objects that you might want.\n\n \"\"\"\n pass\n", "step-4": "from queuingservices.managers.queue_lifecycle_manager import QueueLifecycleManager\nfrom queuingservices.managers.queue_publisher_manager import QueuePublisherManager\nfrom queuingservices.managers.queue_subscriber_manager import QueueSubscriberManager\n\n\nclass QueueMaster(QueueSubscriberManager, QueuePublisherManager,\n QueueLifecycleManager):\n \"\"\"\n\n This class interfaces all types of queue objects that you might want.\n\n \"\"\"\n pass\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.shortcuts import render from .forms import TeacherForm,Teacher from django.http import HttpResponse def add_teacher(request): if request.method=="POST": form=TeacherForm(request.POST) if form.is_valid(): form.save() return redirect("list_teachers") else: return HttpResponse("invalid data",status=400) else: form=TeacherForm() return render(request,"add_teacher.html",{"form":form}) def list_teachers(request): teachers=Teacher.objects.all() return render(request, "list_teachers.html",{"teachers":teachers}) def teacher_detail(request, pk): teacher=Teacher.objects.get(pk=pk) return render(request, "teacher_detail.html",{"teacher":teacher}) def edit_teacher(request, pk): teacher=Teacher.objects.get(pk=pk) if request.method== "POST": form=TeacherForm(request.POST, instance=teacher) if form.is_valid: form.save() return redirect("list_teachers") else: form=TeacherForm(instance=teacher) return render(request, "edit_teacher.html",{"form":form}) # form = TeacherForm() # return render(request,"add_teacher.html",{"form":form}) # Create your views here.
normal
{ "blob_id": "cf97c87400649dd15e5d006707f9adfbd0c91b2c", "index": 4118, "step-1": "<mask token>\n\n\ndef teacher_detail(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n return render(request, 'teacher_detail.html', {'teacher': teacher})\n\n\ndef edit_teacher(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n if request.method == 'POST':\n form = TeacherForm(request.POST, instance=teacher)\n if form.is_valid:\n form.save()\n return redirect('list_teachers')\n else:\n form = TeacherForm(instance=teacher)\n return render(request, 'edit_teacher.html', {'form': form})\n", "step-2": "<mask token>\n\n\ndef list_teachers(request):\n teachers = Teacher.objects.all()\n return render(request, 'list_teachers.html', {'teachers': teachers})\n\n\ndef teacher_detail(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n return render(request, 'teacher_detail.html', {'teacher': teacher})\n\n\ndef edit_teacher(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n if request.method == 'POST':\n form = TeacherForm(request.POST, instance=teacher)\n if form.is_valid:\n form.save()\n return redirect('list_teachers')\n else:\n form = TeacherForm(instance=teacher)\n return render(request, 'edit_teacher.html', {'form': form})\n", "step-3": "<mask token>\n\n\ndef add_teacher(request):\n if request.method == 'POST':\n form = TeacherForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('list_teachers')\n else:\n return HttpResponse('invalid data', status=400)\n else:\n form = TeacherForm()\n return render(request, 'add_teacher.html', {'form': form})\n\n\ndef list_teachers(request):\n teachers = Teacher.objects.all()\n return render(request, 'list_teachers.html', {'teachers': teachers})\n\n\ndef teacher_detail(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n return render(request, 'teacher_detail.html', {'teacher': teacher})\n\n\ndef edit_teacher(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n if request.method == 'POST':\n form = TeacherForm(request.POST, instance=teacher)\n if form.is_valid:\n form.save()\n return redirect('list_teachers')\n else:\n form = TeacherForm(instance=teacher)\n return render(request, 'edit_teacher.html', {'form': form})\n", "step-4": "from django.shortcuts import render\nfrom .forms import TeacherForm, Teacher\nfrom django.http import HttpResponse\n\n\ndef add_teacher(request):\n if request.method == 'POST':\n form = TeacherForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('list_teachers')\n else:\n return HttpResponse('invalid data', status=400)\n else:\n form = TeacherForm()\n return render(request, 'add_teacher.html', {'form': form})\n\n\ndef list_teachers(request):\n teachers = Teacher.objects.all()\n return render(request, 'list_teachers.html', {'teachers': teachers})\n\n\ndef teacher_detail(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n return render(request, 'teacher_detail.html', {'teacher': teacher})\n\n\ndef edit_teacher(request, pk):\n teacher = Teacher.objects.get(pk=pk)\n if request.method == 'POST':\n form = TeacherForm(request.POST, instance=teacher)\n if form.is_valid:\n form.save()\n return redirect('list_teachers')\n else:\n form = TeacherForm(instance=teacher)\n return render(request, 'edit_teacher.html', {'form': form})\n", "step-5": "from django.shortcuts import render\r\nfrom .forms import TeacherForm,Teacher\r\nfrom django.http import HttpResponse\r\n\r\n\r\ndef add_teacher(request):\r\n\tif request.method==\"POST\":\r\n\t\tform=TeacherForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\treturn redirect(\"list_teachers\")\r\n\t\telse:\r\n\t\t\treturn HttpResponse(\"invalid data\",status=400)\r\n\t\t\r\n\telse:\r\n\t\tform=TeacherForm()\r\n\r\n\treturn render(request,\"add_teacher.html\",{\"form\":form})\r\n\r\ndef list_teachers(request):\r\n\tteachers=Teacher.objects.all()\r\n\treturn render(request, \"list_teachers.html\",{\"teachers\":teachers})\r\n\r\ndef teacher_detail(request, pk):\r\n\r\n\tteacher=Teacher.objects.get(pk=pk)\r\n\r\n\treturn render(request, \"teacher_detail.html\",{\"teacher\":teacher})\r\n\r\ndef edit_teacher(request, pk):\r\n\t\r\n\tteacher=Teacher.objects.get(pk=pk)\r\n\r\n\tif request.method== \"POST\":\r\n\t\tform=TeacherForm(request.POST, instance=teacher)\r\n\r\n\t\tif form.is_valid:\r\n\t\t\tform.save()\r\n\t\t\treturn redirect(\"list_teachers\")\r\n\r\n\telse:\r\n\t\tform=TeacherForm(instance=teacher)\r\n\r\n\treturn render(request, \"edit_teacher.html\",{\"form\":form})\r\n\t# form = TeacherForm()\r\n\t# return render(request,\"add_teacher.html\",{\"form\":form})\r\n# Create your views here.\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import rambench rambench.perform_benchmark()
normal
{ "blob_id": "3d1f2130043613dc8d5bbd773edd96c87c355de9", "index": 3455, "step-1": "<mask token>\n", "step-2": "<mask token>\nrambench.perform_benchmark()\n", "step-3": "import rambench\nrambench.perform_benchmark()\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/python import pyglet from pyglet.gl import * win = pyglet.window.Window() @win.event def on_draw(): # Clear buffers glClear(GL_COLOR_BUFFER_BIT) # Draw outlines only glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) # Draw some stuff glBegin(GL_TRIANGLES) glVertex3i(0, 0, 0) glVertex3i(300, 0, 0) glVertex3i(0, 300, 0) glEnd() pyglet.app.run()
normal
{ "blob_id": "86c4193ec0fee8a0c06858913ec8153fcf0df6d9", "index": 4114, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n", "step-3": "<mask token>\nwin = pyglet.window.Window()\n\n\[email protected]\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n", "step-4": "import pyglet\nfrom pyglet.gl import *\nwin = pyglet.window.Window()\n\n\[email protected]\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n glVertex3i(0, 0, 0)\n glVertex3i(300, 0, 0)\n glVertex3i(0, 300, 0)\n glEnd()\n\n\npyglet.app.run()\n", "step-5": "#!/usr/bin/python\n\nimport pyglet\nfrom pyglet.gl import *\n\nwin = pyglet.window.Window()\n\[email protected]\ndef on_draw():\n\n\t# Clear buffers\n\tglClear(GL_COLOR_BUFFER_BIT)\n\n\t# Draw outlines only\n\tglPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n\t# Draw some stuff\n\tglBegin(GL_TRIANGLES)\n\tglVertex3i(0, 0, 0)\n\tglVertex3i(300, 0, 0)\n\tglVertex3i(0, 300, 0)\n\tglEnd()\n\npyglet.app.run()\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
import re text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts' search_pattern = re.compile('nuts') search_match_object = search_pattern.search(text) if search_match_object: print(search_match_object.span()) print(search_match_object.start()) print(search_match_object.end()) print(search_match_object.group()) # Other methods of pattern print(search_pattern.findall(text)) print(search_pattern.fullmatch('nuts')) # The entire string must match print(search_pattern.match('nuts...')) # Start of the string must match
normal
{ "blob_id": "ef5d235f09eea827b240290218c397f880f1046d", "index": 4433, "step-1": "<mask token>\n", "step-2": "<mask token>\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n", "step-3": "<mask token>\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n", "step-4": "import re\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n", "step-5": "import re\n\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\n\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\n\n# Other methods of pattern\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts')) # The entire string must match\nprint(search_pattern.match('nuts...')) # Start of the string must match\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import inputoutput def xor_encryption(source, destination, key): """ Returns text encrypted or decrypted with xor Keyword arguments: source - path to file with text to be encrypted destination - path to the file where you want to save the result key - encryption key """ text = inputoutput.read_from_file(source, "b") # text = read_from_file(source) key = bytearray(key, 'utf-8') result = bytearray() for i in range(len(text)): result.append(text[i] ^ key[i % len(key)]) inputoutput.write_to_file(result, destination, "b") # def write_to_file(data, filename): # """ # Write binary data to file # Keyword arguments: # data - binary data to be written # filename - path to the file where you want to save the result # """ # f = open(filename, 'wb') # f.write(data) # f.close() # def read_from_file(filename): # """ # Read binary data from file # Keyword arguments: # filename - path to the file where you want to save the result # Returns: # data - binary data from file # """ # f = open(filename, 'rb') # data = f.read() # f.close() # return data key = 'verystongk' # Шифрование xor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key) # Расшифрование xor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)
normal
{ "blob_id": "81774d3b4d9fbf22ed19e1cba7ec5e8e3707f51a", "index": 2076, "step-1": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\n<mask token>\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n", "step-3": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\nkey = 'verystongk'\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n", "step-4": "import inputoutput\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\nkey = 'verystongk'\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n", "step-5": "import inputoutput\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, \"b\")\n # text = read_from_file(source)\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, \"b\")\n\n\n# def write_to_file(data, filename):\n# \"\"\"\n# Write binary data to file\n\n# Keyword arguments:\n# data - binary data to be written\n# filename - path to the file where you want to save the result\n# \"\"\"\n# f = open(filename, 'wb')\n# f.write(data)\n# f.close()\n\n\n# def read_from_file(filename):\n# \"\"\"\n# Read binary data from file\n\n# Keyword arguments:\n# filename - path to the file where you want to save the result\n\n# Returns:\n# data - binary data from file\n# \"\"\"\n# f = open(filename, 'rb')\n# data = f.read()\n# f.close()\n# return data\n\n\nkey = 'verystongk'\n# Шифрование\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\n# Расшифрование\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
aes_key = 'eR5ceExL4IpUUY2lqALN7gLXzo11jlXPOwTwFGwOO3h='
normal
{ "blob_id": "7112348631bc60767bfb79c7f6966fc9189c522b", "index": 7901, "step-1": "<mask token>\n", "step-2": "aes_key = 'eR5ceExL4IpUUY2lqALN7gLXzo11jlXPOwTwFGwOO3h='\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import datetime def days_count(year, month, hour): point = datetime.datetime(year, month, hour, 0, 0, 0, 000000) now = datetime.datetime.now() interval_day = point - now return interval_day.days messages = { '猫钰钰 五月有砖搬': '距离 猫钰钰 上岗还有 {} 天'.format(days_count(2019, 6, 1)), # 6.1 上岗 'AD Zh': '距离 AD Zh 换岗还有 {} 天'.format(days_count(2019, 6, 9)), # 6.9 'zzp': '距离 zzp 换岗还有 {} 天'.format(days_count(2019, 9, 1)), # 9.1 'cm': '距离 cm 换岗还有 {} 天'.format(days_count(2019, 7, 8)), # 7.8 '小皮': '距离 小皮 下岗还有 {} 天'.format(days_count(2019, 7, 15)), # 7.15 } group_threshold = 100 person_threshold_1 = 20 person_threshold_2 = 40 person_threshold_3 = 50 warning_1 = '@{},你今天发言已经到达 {} 次,不好好干活,就知道吹逼!' warning_2 = '@{},你今天发言已经到达 {} 次,吹这么多逼,!' warning_3 = '@{},你今天发言已经到达 {} 次,你已经无敌了,我已经管不了你了!' redis_config = { 'host': '', 'port': 6379, 'decode_responses': True, 'db': 2, 'password': 1, }
normal
{ "blob_id": "82ce6304977d468945526824ade1500e10d25d09", "index": 2872, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef days_count(year, month, hour):\n point = datetime.datetime(year, month, hour, 0, 0, 0, 0)\n now = datetime.datetime.now()\n interval_day = point - now\n return interval_day.days\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef days_count(year, month, hour):\n point = datetime.datetime(year, month, hour, 0, 0, 0, 0)\n now = datetime.datetime.now()\n interval_day = point - now\n return interval_day.days\n\n\nmessages = {'猫钰钰 五月有砖搬': '距离 猫钰钰 上岗还有 {} 天'.format(days_count(2019, 6, 1)),\n 'AD Zh': '距离 AD Zh 换岗还有 {} 天'.format(days_count(2019, 6, 9)), 'zzp':\n '距离 zzp 换岗还有 {} 天'.format(days_count(2019, 9, 1)), 'cm':\n '距离 cm 换岗还有 {} 天'.format(days_count(2019, 7, 8)), '小皮':\n '距离 小皮 下岗还有 {} 天'.format(days_count(2019, 7, 15))}\ngroup_threshold = 100\nperson_threshold_1 = 20\nperson_threshold_2 = 40\nperson_threshold_3 = 50\nwarning_1 = '@{},你今天发言已经到达 {} 次,不好好干活,就知道吹逼!'\nwarning_2 = '@{},你今天发言已经到达 {} 次,吹这么多逼,!'\nwarning_3 = '@{},你今天发言已经到达 {} 次,你已经无敌了,我已经管不了你了!'\nredis_config = {'host': '', 'port': 6379, 'decode_responses': True, 'db': 2,\n 'password': 1}\n", "step-4": "import datetime\n\n\ndef days_count(year, month, hour):\n point = datetime.datetime(year, month, hour, 0, 0, 0, 0)\n now = datetime.datetime.now()\n interval_day = point - now\n return interval_day.days\n\n\nmessages = {'猫钰钰 五月有砖搬': '距离 猫钰钰 上岗还有 {} 天'.format(days_count(2019, 6, 1)),\n 'AD Zh': '距离 AD Zh 换岗还有 {} 天'.format(days_count(2019, 6, 9)), 'zzp':\n '距离 zzp 换岗还有 {} 天'.format(days_count(2019, 9, 1)), 'cm':\n '距离 cm 换岗还有 {} 天'.format(days_count(2019, 7, 8)), '小皮':\n '距离 小皮 下岗还有 {} 天'.format(days_count(2019, 7, 15))}\ngroup_threshold = 100\nperson_threshold_1 = 20\nperson_threshold_2 = 40\nperson_threshold_3 = 50\nwarning_1 = '@{},你今天发言已经到达 {} 次,不好好干活,就知道吹逼!'\nwarning_2 = '@{},你今天发言已经到达 {} 次,吹这么多逼,!'\nwarning_3 = '@{},你今天发言已经到达 {} 次,你已经无敌了,我已经管不了你了!'\nredis_config = {'host': '', 'port': 6379, 'decode_responses': True, 'db': 2,\n 'password': 1}\n", "step-5": "import datetime\n\n\ndef days_count(year, month, hour):\n point = datetime.datetime(year, month, hour, 0, 0, 0, 000000)\n now = datetime.datetime.now()\n interval_day = point - now\n return interval_day.days\n\n\nmessages = {\n '猫钰钰 五月有砖搬': '距离 猫钰钰 上岗还有 {} 天'.format(days_count(2019, 6, 1)), # 6.1 上岗\n 'AD Zh': '距离 AD Zh 换岗还有 {} 天'.format(days_count(2019, 6, 9)), # 6.9\n 'zzp': '距离 zzp 换岗还有 {} 天'.format(days_count(2019, 9, 1)), # 9.1\n 'cm': '距离 cm 换岗还有 {} 天'.format(days_count(2019, 7, 8)), # 7.8\n '小皮': '距离 小皮 下岗还有 {} 天'.format(days_count(2019, 7, 15)), # 7.15\n}\n\n\ngroup_threshold = 100\n\nperson_threshold_1 = 20\n\nperson_threshold_2 = 40\n\nperson_threshold_3 = 50\n\nwarning_1 = '@{},你今天发言已经到达 {} 次,不好好干活,就知道吹逼!'\n\nwarning_2 = '@{},你今天发言已经到达 {} 次,吹这么多逼,!'\n\nwarning_3 = '@{},你今天发言已经到达 {} 次,你已经无敌了,我已经管不了你了!'\n\n\nredis_config = {\n 'host': '',\n 'port': 6379,\n 'decode_responses': True,\n 'db': 2,\n 'password': 1,\n}\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # from sklearn import tree # import joblib music_data = pd.read_csv(r"C:\Users\junha\PythonProjects\predict_music_preferences\music.csv") # print(music_data) X = music_data.drop(columns=['genre']) y = music_data['genre'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = DecisionTreeClassifier() model.fit(X_train, y_train) predictions = model.predict(X_test) print(predictions) score = accuracy_score(y_test, predictions) print(score) # joblib.dump(model, 'music-recommender.joblib') # tree.export_graphviz(model, out_file='music-recommender.dot', # feature_names=['age', 'gender'], # class_names=sorted(y.unique()), # label='all', rounded= True, # filled=True)
normal
{ "blob_id": "8dbcd7bba09f8acff860890d8201e016b587796d", "index": 6149, "step-1": "<mask token>\n", "step-2": "<mask token>\nmodel.fit(X_train, y_train)\n<mask token>\nprint(predictions)\n<mask token>\nprint(score)\n", "step-3": "<mask token>\nmusic_data = pd.read_csv(\n 'C:\\\\Users\\\\junha\\\\PythonProjects\\\\predict_music_preferences\\\\music.csv')\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\nscore = accuracy_score(y_test, predictions)\nprint(score)\n", "step-4": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nmusic_data = pd.read_csv(\n 'C:\\\\Users\\\\junha\\\\PythonProjects\\\\predict_music_preferences\\\\music.csv')\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\nscore = accuracy_score(y_test, predictions)\nprint(score)\n", "step-5": "\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n# from sklearn import tree\n# import joblib\nmusic_data = pd.read_csv(r\"C:\\Users\\junha\\PythonProjects\\predict_music_preferences\\music.csv\")\n# print(music_data)\n\n\nX = music_data.drop(columns=['genre'])\ny = music_data['genre']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nprint(predictions)\n\nscore = accuracy_score(y_test, predictions)\nprint(score)\n\n# joblib.dump(model, 'music-recommender.joblib')\n\n# tree.export_graphviz(model, out_file='music-recommender.dot',\n# feature_names=['age', 'gender'],\n# class_names=sorted(y.unique()), \n# label='all', rounded= True,\n# filled=True)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- from django.conf.urls import patterns, url from customer_support.views import update_existing_subscriber, \ add_new_subscriber from .views import (EditSubscriberView, DeActivateSubscriberView, ReActivateSubscriberView, SupportSubscriberReportView, DashboardView) urlpatterns = patterns('', url( regex=r'new_subscriber/$', view=add_new_subscriber, name="support.new_subscriber" ), url( regex=r'update_subscriber/(?P<pk>\d+)/$', view=update_existing_subscriber, name="support.update_subscriber" ), url( regex=r'edit_subscriber/$', view=EditSubscriberView.as_view(), name="support.edit_subscriber" ), url( regex=r'deactivate_subscriber/$', view=DeActivateSubscriberView.as_view(), name="support.deactivate_subscriber" ), url( regex=r'reactivate_subscriber/$', view=ReActivateSubscriberView.as_view(), name="support.reactivate_subscriber" ), url( regex=r'reports/$', view=SupportSubscriberReportView.as_view(), name="support.subscriber_report" ), url( regex=r'dashboard/$', view=DashboardView.as_view(), name="support.dashboard" ), )
normal
{ "blob_id": "fb4818e742ed3c7d131c426811f839dbe70f03de", "index": 2650, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = patterns('', url(regex='new_subscriber/$', view=\n add_new_subscriber, name='support.new_subscriber'), url(regex=\n 'update_subscriber/(?P<pk>\\\\d+)/$', view=update_existing_subscriber,\n name='support.update_subscriber'), url(regex='edit_subscriber/$', view=\n EditSubscriberView.as_view(), name='support.edit_subscriber'), url(\n regex='deactivate_subscriber/$', view=DeActivateSubscriberView.as_view(\n ), name='support.deactivate_subscriber'), url(regex=\n 'reactivate_subscriber/$', view=ReActivateSubscriberView.as_view(),\n name='support.reactivate_subscriber'), url(regex='reports/$', view=\n SupportSubscriberReportView.as_view(), name='support.subscriber_report'\n ), url(regex='dashboard/$', view=DashboardView.as_view(), name=\n 'support.dashboard'))\n", "step-3": "from django.conf.urls import patterns, url\nfrom customer_support.views import update_existing_subscriber, add_new_subscriber\nfrom .views import EditSubscriberView, DeActivateSubscriberView, ReActivateSubscriberView, SupportSubscriberReportView, DashboardView\nurlpatterns = patterns('', url(regex='new_subscriber/$', view=\n add_new_subscriber, name='support.new_subscriber'), url(regex=\n 'update_subscriber/(?P<pk>\\\\d+)/$', view=update_existing_subscriber,\n name='support.update_subscriber'), url(regex='edit_subscriber/$', view=\n EditSubscriberView.as_view(), name='support.edit_subscriber'), url(\n regex='deactivate_subscriber/$', view=DeActivateSubscriberView.as_view(\n ), name='support.deactivate_subscriber'), url(regex=\n 'reactivate_subscriber/$', view=ReActivateSubscriberView.as_view(),\n name='support.reactivate_subscriber'), url(regex='reports/$', view=\n SupportSubscriberReportView.as_view(), name='support.subscriber_report'\n ), url(regex='dashboard/$', view=DashboardView.as_view(), name=\n 'support.dashboard'))\n", "step-4": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nfrom customer_support.views import update_existing_subscriber, \\\n add_new_subscriber\n\nfrom .views import (EditSubscriberView,\n DeActivateSubscriberView,\n ReActivateSubscriberView,\n SupportSubscriberReportView,\n DashboardView)\n\n\nurlpatterns = patterns('',\n url(\n regex=r'new_subscriber/$',\n view=add_new_subscriber,\n name=\"support.new_subscriber\"\n ),\n url(\n regex=r'update_subscriber/(?P<pk>\\d+)/$',\n view=update_existing_subscriber,\n name=\"support.update_subscriber\"\n ),\n url(\n regex=r'edit_subscriber/$',\n view=EditSubscriberView.as_view(),\n name=\"support.edit_subscriber\"\n ),\n url(\n regex=r'deactivate_subscriber/$',\n view=DeActivateSubscriberView.as_view(),\n name=\"support.deactivate_subscriber\"\n ),\n url(\n regex=r'reactivate_subscriber/$',\n view=ReActivateSubscriberView.as_view(),\n name=\"support.reactivate_subscriber\"\n ),\n url(\n regex=r'reports/$',\n view=SupportSubscriberReportView.as_view(),\n name=\"support.subscriber_report\"\n ),\n url(\n regex=r'dashboard/$',\n view=DashboardView.as_view(),\n name=\"support.dashboard\"\n ),\n)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) ledPin = 4 pinOn = False GPIO.setup(ledPin, GPIO.OUT) GPIO.output(ledPin, GPIO.LOW) def print_pin_status(pin_number): GPIO.setup(pin_number, GPIO.IN) value = GPIO.input(pin_number) print(f'Current Value of {pin_number} is {value}') GPIO.setup(pin_number, GPIO.OUT) while True: print_pin_status(ledPin) key = input("Action, press q to quit: ") print(key) if key == ' ': print("space pushed") if key == '1': if pinOn: print("turning led off") GPIO.output(ledPin, GPIO.LOW) pinOn = False else: print("turning led on") GPIO.output(ledPin, GPIO.HIGH) pinOn = True if key == 'q': print("Quiting. . .") break
normal
{ "blob_id": "492c416becc44deaafef519eae8c9a82ac00cc0e", "index": 8632, "step-1": "<mask token>\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\n<mask token>\n", "step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n", "step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nledPin = 4\npinOn = False\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n", "step-4": "import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nledPin = 4\npinOn = False\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n key = input('Action, press q to quit: ')\n print(key)\n if key == ' ':\n print('space pushed')\n if key == '1':\n if pinOn:\n print('turning led off')\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print('turning led on')\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n if key == 'q':\n print('Quiting. . .')\n break\n", "step-5": "#!/usr/bin/python\n\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nledPin = 4\npinOn = False\n\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.output(ledPin, GPIO.LOW)\n\n\ndef print_pin_status(pin_number):\n GPIO.setup(pin_number, GPIO.IN)\n value = GPIO.input(pin_number)\n print(f'Current Value of {pin_number} is {value}')\n GPIO.setup(pin_number, GPIO.OUT)\n\n\nwhile True:\n print_pin_status(ledPin)\n\n key = input(\"Action, press q to quit: \")\n\n print(key)\n\n if key == ' ':\n print(\"space pushed\")\n\n if key == '1':\n\n if pinOn:\n print(\"turning led off\")\n GPIO.output(ledPin, GPIO.LOW)\n pinOn = False\n else:\n print(\"turning led on\")\n GPIO.output(ledPin, GPIO.HIGH)\n pinOn = True\n\n if key == 'q':\n print(\"Quiting. . .\")\n break\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from nmigen import * class Top(Elaboratable): def __init__(self): self.counter = Signal(3) self.led = Signal() def elaborate(self, platform): m = Module() m.d.comb += self.led.eq(self.counter[2]) m.d.sync += self.counter.eq(self.counter + 1) return m
normal
{ "blob_id": "22b6ea64cdb109e1c6b2536b50935d09d37a7e1a", "index": 3057, "step-1": "<mask token>\n\n\nclass Top(Elaboratable):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Top(Elaboratable):\n <mask token>\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-3": "<mask token>\n\n\nclass Top(Elaboratable):\n\n def __init__(self):\n self.counter = Signal(3)\n self.led = Signal()\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-4": "from nmigen import *\n\n\nclass Top(Elaboratable):\n\n def __init__(self):\n self.counter = Signal(3)\n self.led = Signal()\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import os import numpy as np import matplotlib.pyplot as plt from scipy.spatial import distance with open('input.txt', 'r') as f: data = f.read() res = [i for i in data.splitlines()] print(res) newHold = [] for line in res: newHold.append((tuple(int(i) for i in line.split(', ')))) print(newHold) mapper = np.zeros((400,400)) #plt.scatter(*zip(*newHold)) #plt.show() for i, tup in enumerate(newHold): x = tup[0] y = tup[1] if mapper[y][x] == 0: mapper[y][x] = i rows = mapper.shape[0] cols = mapper.shape[1] for num, top in enumerate(newHold): first = list(newHold[num]) for i in range(0, rows): for j in range(0, cols): if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)): mapper[i][j] = distance.cityblock(first, [i,j]) elif mapper[i][j] == distance.cityblock(first, [i,j]): mapper[i][j] = -1000 print(num) plt.imshow(mapper, cmap="viridis") plt.show() plt.imshow(mapper, cmap="viridis") plt.show()
normal
{ "blob_id": "47476fbb78ca8ce14d30bf226795bbd85b5bae45", "index": 6939, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\n<mask token>\nprint(res)\n<mask token>\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\n<mask token>\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n<mask token>\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n", "step-3": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n", "step-4": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n", "step-5": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\n\nwith open('input.txt', 'r') as f:\n data = f.read()\n\nres = [i for i in data.splitlines()]\nprint(res)\n\nnewHold = []\nfor line in res:\n newHold.append((tuple(int(i) for i in line.split(', '))))\nprint(newHold)\nmapper = np.zeros((400,400))\n\n#plt.scatter(*zip(*newHold))\n#plt.show()\n\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n\nrows = mapper.shape[0]\ncols = mapper.shape[1]\n\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):\n mapper[i][j] = distance.cityblock(first, [i,j])\n elif mapper[i][j] == distance.cityblock(first, [i,j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap=\"viridis\")\n plt.show()\n\nplt.imshow(mapper, cmap=\"viridis\")\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import tensorflow as tf def data_rescale(x): return tf.subtract(tf.divide(x, 127.5), 1) def inverse_rescale(y): return tf.round(tf.multiply(tf.add(y, 1), 127.5))
normal
{ "blob_id": "1a09b38838f40c4c6049da8e6a72ba3d56806c07", "index": 3703, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n", "step-3": "<mask token>\n\n\ndef data_rescale(x):\n return tf.subtract(tf.divide(x, 127.5), 1)\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n", "step-4": "import tensorflow as tf\n\n\ndef data_rescale(x):\n return tf.subtract(tf.divide(x, 127.5), 1)\n\n\ndef inverse_rescale(y):\n return tf.round(tf.multiply(tf.add(y, 1), 127.5))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
''' Created on 2018-9-8 @author: weij ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os.path import sys import time import numpy as np from numpy import shape from scipy import linalg from sklearn import datasets,linear_model,cross_validation,svm from sklearn.grid_search import GridSearchCV from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix import com.freebirdweij.goldanalyse.ml.data_util as base import matplotlib.pyplot as plt def test_linearSVC(*data): X_train,X_test,y_train,y_test = data cls = svm.LinearSVC() cls.fit(X_train, y_train) print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_)) print('Scors:%.2f'%cls.score(X_test, y_test)) def test_SVC_linear(*data): X_train,X_test,y_train,y_test = data cls = svm.SVC(kernel='linear') cls.fit(X_train, y_train) print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_)) print('Scors:%.2f'%cls.score(X_test, y_test)) def test_SVC_poly(*data): X_train,X_test,y_train,y_test = data fig = plt.figure() ### test degree ### degrees = range(1,2) train_scores=[] test_scores=[] for degree in degrees: cls = svm.SVC(kernel='poly',degree=degree) cls.fit(X_train, y_train) train_scores.append(cls.score(X_train, y_train)) test_scores.append(cls.score(X_test, y_test)) print('Scors:%.2f'%cls.score(X_test, y_test)) ax=fig.add_subplot(1,3,1) ax.plot(degrees,train_scores,label="Training score ",marker='+') ax.plot(degrees,test_scores,label="Testing score ",marker='o') ax.set_title("SVC_poly_degree ") ax.set_xlabel("p") ax.set_ylabel("score") ax.set_ylim(0,1.05) ax.legend(loc="best",framealpha=0.5) plt.show() def test_SVC_rbf(*data): X_train,X_test,y_train,y_test = data fig = plt.figure() ### test degree ### #gammas = range(1,2) #train_scores=[] #test_scores=[] #for gamma in gammas: cls = svm.SVC(C=1e3,kernel='rbf',gamma=0.1,probability=True) cls.fit(X_train, y_train) #train_scores.append(cls.score(X_train, y_train)) #test_scores.append(cls.score(X_test, y_test)) print('Scors:%.2f'%cls.score(X_test, y_test)) print('probability') print(cls.predict(X_test)) return cls.predict_proba(X_test) #ax=fig.add_subplot(1,1,1) #ax.plot(gammas,train_scores,label="Training score ",marker='+') #ax.plot(gammas,test_scores,label="Testing score ",marker='o') #ax.set_title("SVC_rbf ") #ax.set_xlabel(r"$\gamma$") #ax.set_ylabel("score") #ax.set_ylim(0,1.05) #ax.legend(loc="best",framealpha=0.5) #plt.show() def grid_SVC_rbf(*data): X_train,X_test,y_train,y_test = data fig = plt.figure() ### test degree ### param_grid = {'C':[1e3,5e3,1e4,5e4,1e5], 'gamma':[0.0001,0.0005,0.001,0.005,0.01,0.1]} cls = GridSearchCV(svm.SVC(kernel='rbf'),param_grid) cls.fit(X_train, y_train) print('Best estimotor by GridSearchCV:') print(cls.best_estimator_) def test_SVC_sigmod(*data): X_train,X_test,y_train,y_test = data fig = plt.figure() ### test degree ### gammas = range(1,2) train_scores=[] test_scores=[] for gamma in gammas: cls = svm.SVC(kernel='sigmoid',gamma=gamma,coef0=0) cls.fit(X_train, y_train) train_scores.append(cls.score(X_train, y_train)) test_scores.append(cls.score(X_test, y_test)) print('Scors:%.2f'%cls.score(X_test, y_test)) ax=fig.add_subplot(1,1,1) ax.plot(gammas,train_scores,label="Training score ",marker='+') ax.plot(gammas,test_scores,label="Testing score ",marker='o') ax.set_title("SVC_sigmoid_gamma ") ax.set_xscale("log") ax.set_xlabel(r"$\gamma$") ax.set_ylabel("score") ax.set_ylim(0,1.05) ax.legend(loc="best",framealpha=0.5) plt.show() def main(): DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv' DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv' train_datas = base.load_csv_without_header(DATA_TRAIN,target_dtype=np.int16, features_dtype=np.float32,target_column=0) test_datas = base.load_csv_without_header(DATA_TEST,target_dtype=np.int16, features_dtype=np.float32,target_column=0) test_SVC_sigmod(train_datas.data,test_datas.data,train_datas.target,test_datas.target) #pro_date = test_SVC_rbf(train_datas.data,test_datas.data,train_datas.target,test_datas.target) #dataMat = input_datas.data #print('dataMat:-----------------------') #print(dataMat) #pcaData = np.dot(dataMat,eig_vect) #reconMat = np.dot(pcaData,eig_vect.T)+mean_v #Reconstructed datas. #print('k:-----------------------') #print(k) #print('pcaData:-----------------------') #print(pcaData) #print('reconMat:-----------------------') #print(reconMat) #base.write_a_dataset_to_a_csv('audt365-2018-2-21-day-class21-high100-round-test-svm.csv', pro_date) #base.write_a_dataset_to_a_csv('hjxh365-2018-4-16-day-plus-norm-clear-pca9999-recn.csv', reconMat) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--learning_rate', type=float, default=0.01, help='Initial learning rate.' ) parser.add_argument( '--max_steps', type=int, default=100000, help='Number of steps to run trainer.' ) parser.add_argument( '--percentage', type=float, default=0.99, help='Number of float for pca remain percentage.' ) parser.add_argument( '--hidden2', type=int, default=32, help='Number of units in hidden layer 2.' ) parser.add_argument( '--batch_size', type=int, default=1, help='Batch size. Must divide evenly into the dataset sizes.' ) parser.add_argument( '--input_data_dir', type=str, default='/home/freebirdweij/tf_works/invest', help='Directory to put the input data.' ) parser.add_argument( '--log_dir', type=str, default='/home/freebirdweij/tf_works/invest/logs', help='Directory to put the log data.' ) parser.add_argument( '--fake_data', default=False, help='If true, uses fake data for unit testing.', action='store_true' ) FLAGS, unparsed = parser.parse_known_args() main()
normal
{ "blob_id": "49995e60b817e2c5a2ea7e85e4fe96ca95363cb2", "index": 2148, "step-1": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\n<mask token>\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\n<mask token>\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\ndef test_SVC_sigmod(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n gammas = range(1, 2)\n train_scores = []\n test_scores = []\n for gamma in gammas:\n cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(gammas, train_scores, label='Training score ', marker='+')\n ax.plot(gammas, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_sigmoid_gamma ')\n ax.set_xscale('log')\n ax.set_xlabel('$\\\\gamma$')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\ndef test_SVC_sigmod(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n gammas = range(1, 2)\n train_scores = []\n test_scores = []\n for gamma in gammas:\n cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(gammas, train_scores, label='Training score ', marker='+')\n ax.plot(gammas, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_sigmoid_gamma ')\n ax.set_xscale('log')\n ax.set_xlabel('$\\\\gamma$')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--learning_rate', type=float, default=0.01, help=\n 'Initial learning rate.')\n parser.add_argument('--max_steps', type=int, default=100000, help=\n 'Number of steps to run trainer.')\n parser.add_argument('--percentage', type=float, default=0.99, help=\n 'Number of float for pca remain percentage.')\n parser.add_argument('--hidden2', type=int, default=32, help=\n 'Number of units in hidden layer 2.')\n parser.add_argument('--batch_size', type=int, default=1, help=\n 'Batch size. Must divide evenly into the dataset sizes.')\n parser.add_argument('--input_data_dir', type=str, default=\n '/home/freebirdweij/tf_works/invest', help=\n 'Directory to put the input data.')\n parser.add_argument('--log_dir', type=str, default=\n '/home/freebirdweij/tf_works/invest/logs', help=\n 'Directory to put the log data.')\n parser.add_argument('--fake_data', default=False, help=\n 'If true, uses fake data for unit testing.', action='store_true')\n FLAGS, unparsed = parser.parse_known_args()\n main()\n", "step-5": "'''\r\nCreated on 2018-9-8\r\n\r\n@author: weij\r\n'''\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport os.path\r\nimport sys\r\nimport time\r\nimport numpy as np\r\n\r\n\r\nfrom numpy import shape\r\nfrom scipy import linalg\r\nfrom sklearn import datasets,linear_model,cross_validation,svm\r\nfrom sklearn.grid_search import GridSearchCV\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport com.freebirdweij.goldanalyse.ml.data_util as base\r\nimport matplotlib.pyplot as plt\r\n\r\ndef test_linearSVC(*data):\r\n X_train,X_test,y_train,y_test = data\r\n cls = svm.LinearSVC()\r\n cls.fit(X_train, y_train)\r\n print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\ndef test_SVC_linear(*data):\r\n X_train,X_test,y_train,y_test = data\r\n cls = svm.SVC(kernel='linear')\r\n cls.fit(X_train, y_train)\r\n print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\ndef test_SVC_poly(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n degrees = range(1,2)\r\n train_scores=[]\r\n test_scores=[]\r\n for degree in degrees:\r\n cls = svm.SVC(kernel='poly',degree=degree)\r\n cls.fit(X_train, y_train)\r\n train_scores.append(cls.score(X_train, y_train))\r\n test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\n ax=fig.add_subplot(1,3,1)\r\n ax.plot(degrees,train_scores,label=\"Training score \",marker='+')\r\n ax.plot(degrees,test_scores,label=\"Testing score \",marker='o')\r\n ax.set_title(\"SVC_poly_degree \")\r\n ax.set_xlabel(\"p\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0,1.05)\r\n ax.legend(loc=\"best\",framealpha=0.5)\r\n plt.show()\r\n \r\ndef test_SVC_rbf(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n #gammas = range(1,2)\r\n #train_scores=[]\r\n #test_scores=[]\r\n #for gamma in gammas:\r\n cls = svm.SVC(C=1e3,kernel='rbf',gamma=0.1,probability=True)\r\n cls.fit(X_train, y_train)\r\n #train_scores.append(cls.score(X_train, y_train))\r\n #test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n print('probability')\r\n print(cls.predict(X_test))\r\n return cls.predict_proba(X_test)\r\n \r\n #ax=fig.add_subplot(1,1,1)\r\n #ax.plot(gammas,train_scores,label=\"Training score \",marker='+')\r\n #ax.plot(gammas,test_scores,label=\"Testing score \",marker='o')\r\n #ax.set_title(\"SVC_rbf \")\r\n #ax.set_xlabel(r\"$\\gamma$\")\r\n #ax.set_ylabel(\"score\")\r\n #ax.set_ylim(0,1.05)\r\n #ax.legend(loc=\"best\",framealpha=0.5)\r\n #plt.show()\r\n \r\ndef grid_SVC_rbf(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n param_grid = {'C':[1e3,5e3,1e4,5e4,1e5],\r\n 'gamma':[0.0001,0.0005,0.001,0.005,0.01,0.1]}\r\n cls = GridSearchCV(svm.SVC(kernel='rbf'),param_grid)\r\n cls.fit(X_train, y_train)\r\n print('Best estimotor by GridSearchCV:')\r\n print(cls.best_estimator_)\r\n \r\n \r\ndef test_SVC_sigmod(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n gammas = range(1,2)\r\n train_scores=[]\r\n test_scores=[]\r\n for gamma in gammas:\r\n cls = svm.SVC(kernel='sigmoid',gamma=gamma,coef0=0)\r\n cls.fit(X_train, y_train)\r\n train_scores.append(cls.score(X_train, y_train))\r\n test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\n ax=fig.add_subplot(1,1,1)\r\n ax.plot(gammas,train_scores,label=\"Training score \",marker='+')\r\n ax.plot(gammas,test_scores,label=\"Testing score \",marker='o')\r\n ax.set_title(\"SVC_sigmoid_gamma \")\r\n ax.set_xscale(\"log\")\r\n ax.set_xlabel(r\"$\\gamma$\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0,1.05)\r\n ax.legend(loc=\"best\",framealpha=0.5)\r\n plt.show()\r\n \r\ndef main():\r\n \r\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\r\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\r\n\r\n train_datas = base.load_csv_without_header(DATA_TRAIN,target_dtype=np.int16,\r\n features_dtype=np.float32,target_column=0)\r\n test_datas = base.load_csv_without_header(DATA_TEST,target_dtype=np.int16,\r\n features_dtype=np.float32,target_column=0)\r\n \r\n test_SVC_sigmod(train_datas.data,test_datas.data,train_datas.target,test_datas.target)\r\n #pro_date = test_SVC_rbf(train_datas.data,test_datas.data,train_datas.target,test_datas.target)\r\n \r\n #dataMat = input_datas.data\r\n #print('dataMat:-----------------------')\r\n #print(dataMat)\r\n\r\n #pcaData = np.dot(dataMat,eig_vect)\r\n #reconMat = np.dot(pcaData,eig_vect.T)+mean_v #Reconstructed datas.\r\n #print('k:-----------------------')\r\n #print(k)\r\n #print('pcaData:-----------------------')\r\n #print(pcaData)\r\n #print('reconMat:-----------------------')\r\n #print(reconMat)\r\n #base.write_a_dataset_to_a_csv('audt365-2018-2-21-day-class21-high100-round-test-svm.csv', pro_date)\r\n #base.write_a_dataset_to_a_csv('hjxh365-2018-4-16-day-plus-norm-clear-pca9999-recn.csv', reconMat)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n '--learning_rate',\r\n type=float,\r\n default=0.01,\r\n help='Initial learning rate.'\r\n )\r\n parser.add_argument(\r\n '--max_steps',\r\n type=int,\r\n default=100000,\r\n help='Number of steps to run trainer.'\r\n )\r\n parser.add_argument(\r\n '--percentage',\r\n type=float,\r\n default=0.99,\r\n help='Number of float for pca remain percentage.'\r\n )\r\n parser.add_argument(\r\n '--hidden2',\r\n type=int,\r\n default=32,\r\n help='Number of units in hidden layer 2.'\r\n )\r\n parser.add_argument(\r\n '--batch_size',\r\n type=int,\r\n default=1,\r\n help='Batch size. Must divide evenly into the dataset sizes.'\r\n )\r\n parser.add_argument(\r\n '--input_data_dir',\r\n type=str,\r\n default='/home/freebirdweij/tf_works/invest',\r\n help='Directory to put the input data.'\r\n )\r\n parser.add_argument(\r\n '--log_dir',\r\n type=str,\r\n default='/home/freebirdweij/tf_works/invest/logs',\r\n help='Directory to put the log data.'\r\n )\r\n parser.add_argument(\r\n '--fake_data',\r\n default=False,\r\n help='If true, uses fake data for unit testing.',\r\n action='store_true'\r\n )\r\n\r\n FLAGS, unparsed = parser.parse_known_args()\r\n main()\r\n", "step-ids": [ 5, 6, 7, 8, 10 ] }
[ 5, 6, 7, 8, 10 ]
# Generated by Django 3.2.7 on 2021-10-01 08:36 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0005_alter_users_is_active'), ] operations = [ migrations.AlterModelManagers( name='users', managers=[ ], ), ]
normal
{ "blob_id": "6670295241516664e30c7db5cd3b5e2fb6c4fb05", "index": 1985, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0005_alter_users_is_active')]\n operations = [migrations.AlterModelManagers(name='users', managers=[])]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0005_alter_users_is_active')]\n operations = [migrations.AlterModelManagers(name='users', managers=[])]\n", "step-5": "# Generated by Django 3.2.7 on 2021-10-01 08:36\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0005_alter_users_is_active'),\n ]\n\n operations = [\n migrations.AlterModelManagers(\n name='users',\n managers=[\n ],\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
old_file = open("new.csv", "r") new_file = open("new1,csv", "w") for line in old_file.readlines(): cleaned_line =line.replace(',','.') new_file.write(cleaned_line) old_file.close new_file.close
normal
{ "blob_id": "b3d26d01d45c073192d06c8e94c06f7eae267b14", "index": 968, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n", "step-3": "old_file = open('new.csv', 'r')\nnew_file = open('new1,csv', 'w')\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n", "step-4": "old_file = open(\"new.csv\", \"r\")\nnew_file = open(\"new1,csv\", \"w\")\nfor line in old_file.readlines():\n cleaned_line =line.replace(',','.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import sys def solve(): numEngines = int(sys.stdin.readline()) engines = [] for _ in range(numEngines): engine = sys.stdin.readline() engines.append(engine) numQueries = int(sys.stdin.readline()) queries = [] for _ in range(numQueries): query = sys.stdin.readline() queries.append(query) remainingEngines = set(engines) switches = 0 for query in queries: remainingEngines.discard(query) if not remainingEngines: remainingEngines = set(engines) remainingEngines.discard(query) switches += 1 return switches cases = int(sys.stdin.readline()) for case in range(cases): print 'Case #%d: %s' % (case + 1, solve())
normal
{ "blob_id": "174f5b04f02ec0c9651d5e34c8b04df8bfd4dff4", "index": 1943, "step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef solve():\n\tnumEngines = int(sys.stdin.readline())\n\tengines = []\n\tfor _ in range(numEngines):\n\t\tengine = sys.stdin.readline()\n\t\tengines.append(engine)\n\n\tnumQueries = int(sys.stdin.readline())\n\tqueries = []\n\tfor _ in range(numQueries):\n\t\tquery = sys.stdin.readline()\n\t\tqueries.append(query)\n\n\tremainingEngines = set(engines)\n\tswitches = 0\n\tfor query in queries:\n\t\tremainingEngines.discard(query)\n\t\tif not remainingEngines:\n\t\t\tremainingEngines = set(engines)\n\t\t\tremainingEngines.discard(query)\n\t\t\tswitches += 1\n\n\treturn switches\n\ncases = int(sys.stdin.readline())\nfor case in range(cases):\n\tprint 'Case #%d: %s' % (case + 1, solve())\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from oscar.app import Shop from apps.catalogue.app import application as catalogue_app class BaseApplication(Shop): catalogue_app = catalogue_app application = BaseApplication()
normal
{ "blob_id": "c8bb6ead7e305f466e24b47811d6ed38c8cfec0a", "index": 2691, "step-1": "<mask token>\n\n\nclass BaseApplication(Shop):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n", "step-4": "from oscar.app import Shop\nfrom apps.catalogue.app import application as catalogue_app\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import dash_html_components as html import dash_core_components as dcc import dash_daq as daq import dash_bootstrap_components as dbc import src.common.common_layout as layout_common def build_navbar(): return html.Div( id="banner", children=[ html.Div( id="banner-text", className="banner", children=[ dbc.Row( [ dbc.Col(html.Div(html.H2("CBPM real-time display")), width=11), dbc.Col( html.Div( id="banner-logo", children=[ html.Button( id="learn-more-button", children="INFORMATION", n_clicks=0 ), ], ), ) ], ), ], ), html.Div( className="banner2", children=[ dbc.Row( [ dbc.Col( html.Div( daq.PowerButton( id='live_update_switch', on='True', size=50, color='#079407', # label='Label', # labelPosition='top' ), id='test_button', style={'padding': '10px 0px 0px 0px'}, ), width={"size": 1}, ), dbc.Col( html.Div( children=[ html.H2("Live update is:"), html.H2( id='live_update_running', style={'margin-left': '1.0%', 'color': '#079407', 'font-weight': 'bold'}, ), html.H2( id='live_update_paused', style={'margin-left': '0.5%', 'color': '#e0392a', 'font-weight': 'bold'}, ), ], ), #style={'padding': '0px 1000px 0px 0px'}, ), dbc.Col( html.Div(id='offline_store_df', style={'display': 'none'}), ), dbc.Col( layout_common.dropdown_menu(), width=2, ) ], no_gutters=True, justify='start', ) ] ) ], ) def generate_modal(): return html.Div( id="markdown", className="modal", children=( html.Div( id="markdown-container", className="markdown-container", children=[ html.Div( className="close-container", children=html.Button( "Close", id="markdown_close", n_clicks=0, className="closeButton", ), ), html.Div( className="markdown-text", children=dcc.Markdown( children=( """ ###### What is this mock app about? This is a dashboard for monitoring real-time process quality along manufacture production line. ###### What does this app shows Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel. The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data. The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will trigger alerts instantly for a detailed checkup. Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab. """ ) ), ), ], ) ), )
normal
{ "blob_id": "f9dd20a3b72c0c8e72029459244486f31eaff536", "index": 9411, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef generate_modal():\n return html.Div(id='markdown', className='modal', children=html.Div(id=\n 'markdown-container', className='markdown-container', children=[\n html.Div(className='close-container', children=html.Button('Close',\n id='markdown_close', n_clicks=0, className='closeButton')), html.\n Div(className='markdown-text', children=dcc.Markdown(children=\n \"\"\"\n ###### What is this mock app about?\n This is a dashboard for monitoring real-time process quality along manufacture production line.\n ###### What does this app shows\n Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel.\n The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data.\n The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will\n trigger alerts instantly for a detailed checkup.\n\n Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab.\n \"\"\"\n ))]))\n", "step-3": "<mask token>\n\n\ndef build_navbar():\n return html.Div(id='banner', children=[html.Div(id='banner-text',\n className='banner', children=[dbc.Row([dbc.Col(html.Div(html.H2(\n 'CBPM real-time display')), width=11), dbc.Col(html.Div(id=\n 'banner-logo', children=[html.Button(id='learn-more-button',\n children='INFORMATION', n_clicks=0)]))])]), html.Div(className=\n 'banner2', children=[dbc.Row([dbc.Col(html.Div(daq.PowerButton(id=\n 'live_update_switch', on='True', size=50, color='#079407'), id=\n 'test_button', style={'padding': '10px 0px 0px 0px'}), width={\n 'size': 1}), dbc.Col(html.Div(children=[html.H2('Live update is:'),\n html.H2(id='live_update_running', style={'margin-left': '1.0%',\n 'color': '#079407', 'font-weight': 'bold'}), html.H2(id=\n 'live_update_paused', style={'margin-left': '0.5%', 'color':\n '#e0392a', 'font-weight': 'bold'})])), dbc.Col(html.Div(id=\n 'offline_store_df', style={'display': 'none'})), dbc.Col(\n layout_common.dropdown_menu(), width=2)], no_gutters=True, justify=\n 'start')])])\n\n\ndef generate_modal():\n return html.Div(id='markdown', className='modal', children=html.Div(id=\n 'markdown-container', className='markdown-container', children=[\n html.Div(className='close-container', children=html.Button('Close',\n id='markdown_close', n_clicks=0, className='closeButton')), html.\n Div(className='markdown-text', children=dcc.Markdown(children=\n \"\"\"\n ###### What is this mock app about?\n This is a dashboard for monitoring real-time process quality along manufacture production line.\n ###### What does this app shows\n Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel.\n The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data.\n The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will\n trigger alerts instantly for a detailed checkup.\n\n Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab.\n \"\"\"\n ))]))\n", "step-4": "import dash_html_components as html\nimport dash_core_components as dcc\nimport dash_daq as daq\nimport dash_bootstrap_components as dbc\nimport src.common.common_layout as layout_common\n\n\ndef build_navbar():\n return html.Div(id='banner', children=[html.Div(id='banner-text',\n className='banner', children=[dbc.Row([dbc.Col(html.Div(html.H2(\n 'CBPM real-time display')), width=11), dbc.Col(html.Div(id=\n 'banner-logo', children=[html.Button(id='learn-more-button',\n children='INFORMATION', n_clicks=0)]))])]), html.Div(className=\n 'banner2', children=[dbc.Row([dbc.Col(html.Div(daq.PowerButton(id=\n 'live_update_switch', on='True', size=50, color='#079407'), id=\n 'test_button', style={'padding': '10px 0px 0px 0px'}), width={\n 'size': 1}), dbc.Col(html.Div(children=[html.H2('Live update is:'),\n html.H2(id='live_update_running', style={'margin-left': '1.0%',\n 'color': '#079407', 'font-weight': 'bold'}), html.H2(id=\n 'live_update_paused', style={'margin-left': '0.5%', 'color':\n '#e0392a', 'font-weight': 'bold'})])), dbc.Col(html.Div(id=\n 'offline_store_df', style={'display': 'none'})), dbc.Col(\n layout_common.dropdown_menu(), width=2)], no_gutters=True, justify=\n 'start')])])\n\n\ndef generate_modal():\n return html.Div(id='markdown', className='modal', children=html.Div(id=\n 'markdown-container', className='markdown-container', children=[\n html.Div(className='close-container', children=html.Button('Close',\n id='markdown_close', n_clicks=0, className='closeButton')), html.\n Div(className='markdown-text', children=dcc.Markdown(children=\n \"\"\"\n ###### What is this mock app about?\n This is a dashboard for monitoring real-time process quality along manufacture production line.\n ###### What does this app shows\n Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel.\n The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data.\n The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will\n trigger alerts instantly for a detailed checkup.\n\n Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab.\n \"\"\"\n ))]))\n", "step-5": "import dash_html_components as html\nimport dash_core_components as dcc\nimport dash_daq as daq\nimport dash_bootstrap_components as dbc\n\nimport src.common.common_layout as layout_common\n\n\ndef build_navbar():\n return html.Div(\n id=\"banner\",\n children=[\n html.Div(\n id=\"banner-text\",\n className=\"banner\",\n children=[\n dbc.Row(\n [\n dbc.Col(html.Div(html.H2(\"CBPM real-time display\")), width=11),\n dbc.Col(\n html.Div(\n id=\"banner-logo\",\n children=[\n html.Button(\n id=\"learn-more-button\", children=\"INFORMATION\", n_clicks=0\n ),\n ],\n ),\n )\n ],\n ),\n ],\n ),\n html.Div(\n className=\"banner2\",\n children=[\n dbc.Row(\n [\n dbc.Col(\n html.Div(\n daq.PowerButton(\n id='live_update_switch',\n on='True',\n size=50,\n color='#079407',\n # label='Label',\n # labelPosition='top'\n ),\n id='test_button',\n style={'padding': '10px 0px 0px 0px'},\n ), width={\"size\": 1},\n ),\n dbc.Col(\n html.Div(\n children=[\n html.H2(\"Live update is:\"),\n html.H2(\n id='live_update_running',\n style={'margin-left': '1.0%', 'color': '#079407', 'font-weight': 'bold'},\n ),\n html.H2(\n id='live_update_paused',\n style={'margin-left': '0.5%', 'color': '#e0392a', 'font-weight': 'bold'},\n ),\n ],\n ), #style={'padding': '0px 1000px 0px 0px'},\n ),\n dbc.Col(\n html.Div(id='offline_store_df', style={'display': 'none'}),\n ),\n dbc.Col(\n layout_common.dropdown_menu(), width=2,\n )\n ], no_gutters=True, justify='start',\n )\n ]\n )\n ],\n )\n\ndef generate_modal():\n return html.Div(\n id=\"markdown\",\n className=\"modal\",\n children=(\n html.Div(\n id=\"markdown-container\",\n className=\"markdown-container\",\n children=[\n html.Div(\n className=\"close-container\",\n children=html.Button(\n \"Close\",\n id=\"markdown_close\",\n n_clicks=0,\n className=\"closeButton\",\n ),\n ),\n html.Div(\n className=\"markdown-text\",\n children=dcc.Markdown(\n children=(\n \"\"\"\n ###### What is this mock app about?\n This is a dashboard for monitoring real-time process quality along manufacture production line.\n ###### What does this app shows\n Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel.\n The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data.\n The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will\n trigger alerts instantly for a detailed checkup.\n\n Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab.\n \"\"\"\n )\n ),\n ),\n ],\n )\n ),\n )", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#! /usr/bin/env python # -*- coding: utf-8 -*- # __author__ = "Sponge_sy" # Date: 2021/9/11 import numpy from tqdm import tqdm from bert4keras.tokenizers import Tokenizer from bert4keras.models import build_transformer_model from bert4keras.snippets import sequence_padding, DataGenerator from utils import * class data_generator(DataGenerator): """Data Generator""" def __init__(self, pattern="", is_pre=True, *args, **kwargs): super(data_generator, self).__init__(*args, **kwargs) self.pattern = pattern self.is_pre = is_pre def __iter__(self, random=False): batch_token_ids, batch_segment_ids, batch_output_ids = [], [], [] for is_end, text in self.sample(random): if (self.is_pre): token_ids, segment_ids = tokenizer.encode(first_text=self.pattern, second_text=text, maxlen=maxlen) else: token_ids, segment_ids = tokenizer.encode(first_text=text, second_text=self.pattern, maxlen=maxlen) source_ids, target_ids = token_ids[:], token_ids[:] batch_token_ids.append(source_ids) batch_segment_ids.append(segment_ids) if len(batch_token_ids) == self.batch_size or is_end: batch_token_ids = sequence_padding(batch_token_ids) batch_segment_ids = sequence_padding(batch_segment_ids) yield [batch_token_ids, batch_segment_ids], None batch_token_ids, batch_segment_ids, = [], [] def predict(data_generator_list, data): print("\n*******************Start to Zero-Shot predict*******************", flush=True) patterns_logits = [[] for _ in patterns] samples_logits = [[] for _ in data] for i in range(len(data_generator_list)): print("\nPattern{}".format(i), flush=True) data_generator = data_generator_list[i] counter = 0 for (x, _) in tqdm(data_generator): outputs = model.predict(x[:2]) for out in outputs: logit_pos = out[0].T patterns_logits[i].append(logit_pos) samples_logits[counter].append(logit_pos) counter += 1 preds = [] for i in range(len(patterns_logits[0])): pred = numpy.argmax([logits[i] for logits in patterns_logits]) preds.append(int(pred)) return preds, samples_logits if __name__ == "__main__": # Load the hyper-parameters----------------------------------------------------------- maxlen = 128 # The max length 128 is used in our paper batch_size = 40 # Will not influence the results # Choose a model---------------------------------------------------------------------- # Recommend to use 'uer-mixed-bert-base' # model_names = ['google-bert', 'google-bert-small', 'google-bert-zh', # 'hfl-bert-wwm', 'hfl-bert-wwm-ext', # 'uer-mixed-bert-tiny', 'uer-mixed-bert-small', # 'uer-mixed-bert-base', 'uer-mixed-bert-large'] model_name = 'uer-mixed-bert-base' # Choose a dataset---------------------------------------------------------------------- # dataset_names = ['eprstmt', 'tnews', 'csldcp', 'iflytek'] # dataset_name = 'eprstmt' # Load model and dataset class bert_model = Model(model_name=model_name) # Create a template -------------------------------------------------------------------- label_names = ['entertainment', 'sports', 'music', 'games', 'economics', 'education'] patterns = ["This is {} news".format(label) for label in label_names] # Prefix or Suffix------------------------------------------------------------------- is_pre = True # Load the demo set-------------------------------------------------------------------- demo_data_en = ['FIFA unveils biennial World Cup plan, UEFA threatens boycott', 'COVID vaccines hold up against severe Delta: US data', 'Justin Drew Bieber was born on March 1, 1994 at St. ', 'Horizon launches latest chip to take on global rivals', 'Twitch video gamers rise up to stop ‘hate raids’'] demo_data = demo_data_en demo_generator_list = [] for p in patterns: demo_generator_list.append(data_generator(pattern=p, is_pre=is_pre, data=demo_data, batch_size=batch_size)) # Build BERT model--------------------------------------------------------------------- tokenizer = Tokenizer('.' + bert_model.dict_path, do_lower_case=True) # Load BERET model with NSP head model = build_transformer_model( config_path='.' + bert_model.config_path, checkpoint_path='.' + bert_model.checkpoint_path, with_nsp=True, ) # Zero-Shot predict and evaluate------------------------------------------------------- preds, samples_logits = predict(demo_generator_list, demo_data) for i, (p, d) in enumerate(zip(preds, demo_data)): pred_label = label_names[p] print("Sample {}:".format(i)) print("Original Text: {}".format(d)) print("Predict label: {}".format(pred_label)) print("Logits: {}".format(samples_logits[i])) print()
normal
{ "blob_id": "5cb390b06026bc0899c0b10dc93f3ec1f2ffefa6", "index": 9727, "step-1": "<mask token>\n\n\nclass data_generator(DataGenerator):\n <mask token>\n\n def __init__(self, pattern='', is_pre=True, *args, **kwargs):\n super(data_generator, self).__init__(*args, **kwargs)\n self.pattern = pattern\n self.is_pre = is_pre\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []\n for is_end, text in self.sample(random):\n if self.is_pre:\n token_ids, segment_ids = tokenizer.encode(first_text=self.\n pattern, second_text=text, maxlen=maxlen)\n else:\n token_ids, segment_ids = tokenizer.encode(first_text=text,\n second_text=self.pattern, maxlen=maxlen)\n source_ids, target_ids = token_ids[:], token_ids[:]\n batch_token_ids.append(source_ids)\n batch_segment_ids.append(segment_ids)\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n yield [batch_token_ids, batch_segment_ids], None\n batch_token_ids, batch_segment_ids = [], []\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass data_generator(DataGenerator):\n \"\"\"Data Generator\"\"\"\n\n def __init__(self, pattern='', is_pre=True, *args, **kwargs):\n super(data_generator, self).__init__(*args, **kwargs)\n self.pattern = pattern\n self.is_pre = is_pre\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []\n for is_end, text in self.sample(random):\n if self.is_pre:\n token_ids, segment_ids = tokenizer.encode(first_text=self.\n pattern, second_text=text, maxlen=maxlen)\n else:\n token_ids, segment_ids = tokenizer.encode(first_text=text,\n second_text=self.pattern, maxlen=maxlen)\n source_ids, target_ids = token_ids[:], token_ids[:]\n batch_token_ids.append(source_ids)\n batch_segment_ids.append(segment_ids)\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n yield [batch_token_ids, batch_segment_ids], None\n batch_token_ids, batch_segment_ids = [], []\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass data_generator(DataGenerator):\n \"\"\"Data Generator\"\"\"\n\n def __init__(self, pattern='', is_pre=True, *args, **kwargs):\n super(data_generator, self).__init__(*args, **kwargs)\n self.pattern = pattern\n self.is_pre = is_pre\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []\n for is_end, text in self.sample(random):\n if self.is_pre:\n token_ids, segment_ids = tokenizer.encode(first_text=self.\n pattern, second_text=text, maxlen=maxlen)\n else:\n token_ids, segment_ids = tokenizer.encode(first_text=text,\n second_text=self.pattern, maxlen=maxlen)\n source_ids, target_ids = token_ids[:], token_ids[:]\n batch_token_ids.append(source_ids)\n batch_segment_ids.append(segment_ids)\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n yield [batch_token_ids, batch_segment_ids], None\n batch_token_ids, batch_segment_ids = [], []\n\n\ndef predict(data_generator_list, data):\n print('\\n*******************Start to Zero-Shot predict*******************',\n flush=True)\n patterns_logits = [[] for _ in patterns]\n samples_logits = [[] for _ in data]\n for i in range(len(data_generator_list)):\n print('\\nPattern{}'.format(i), flush=True)\n data_generator = data_generator_list[i]\n counter = 0\n for x, _ in tqdm(data_generator):\n outputs = model.predict(x[:2])\n for out in outputs:\n logit_pos = out[0].T\n patterns_logits[i].append(logit_pos)\n samples_logits[counter].append(logit_pos)\n counter += 1\n preds = []\n for i in range(len(patterns_logits[0])):\n pred = numpy.argmax([logits[i] for logits in patterns_logits])\n preds.append(int(pred))\n return preds, samples_logits\n\n\nif __name__ == '__main__':\n maxlen = 128\n batch_size = 40\n model_name = 'uer-mixed-bert-base'\n bert_model = Model(model_name=model_name)\n label_names = ['entertainment', 'sports', 'music', 'games', 'economics',\n 'education']\n patterns = ['This is {} news'.format(label) for label in label_names]\n is_pre = True\n demo_data_en = [\n 'FIFA unveils biennial World Cup plan, UEFA threatens boycott',\n 'COVID vaccines hold up against severe Delta: US data',\n 'Justin Drew Bieber was born on March 1, 1994 at St. ',\n 'Horizon launches latest chip to take on global rivals',\n 'Twitch video gamers rise up to stop ‘hate raids’']\n demo_data = demo_data_en\n demo_generator_list = []\n for p in patterns:\n demo_generator_list.append(data_generator(pattern=p, is_pre=is_pre,\n data=demo_data, batch_size=batch_size))\n tokenizer = Tokenizer('.' + bert_model.dict_path, do_lower_case=True)\n model = build_transformer_model(config_path='.' + bert_model.\n config_path, checkpoint_path='.' + bert_model.checkpoint_path,\n with_nsp=True)\n preds, samples_logits = predict(demo_generator_list, demo_data)\n for i, (p, d) in enumerate(zip(preds, demo_data)):\n pred_label = label_names[p]\n print('Sample {}:'.format(i))\n print('Original Text: {}'.format(d))\n print('Predict label: {}'.format(pred_label))\n print('Logits: {}'.format(samples_logits[i]))\n print()\n", "step-4": "import numpy\nfrom tqdm import tqdm\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.snippets import sequence_padding, DataGenerator\nfrom utils import *\n\n\nclass data_generator(DataGenerator):\n \"\"\"Data Generator\"\"\"\n\n def __init__(self, pattern='', is_pre=True, *args, **kwargs):\n super(data_generator, self).__init__(*args, **kwargs)\n self.pattern = pattern\n self.is_pre = is_pre\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []\n for is_end, text in self.sample(random):\n if self.is_pre:\n token_ids, segment_ids = tokenizer.encode(first_text=self.\n pattern, second_text=text, maxlen=maxlen)\n else:\n token_ids, segment_ids = tokenizer.encode(first_text=text,\n second_text=self.pattern, maxlen=maxlen)\n source_ids, target_ids = token_ids[:], token_ids[:]\n batch_token_ids.append(source_ids)\n batch_segment_ids.append(segment_ids)\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n yield [batch_token_ids, batch_segment_ids], None\n batch_token_ids, batch_segment_ids = [], []\n\n\ndef predict(data_generator_list, data):\n print('\\n*******************Start to Zero-Shot predict*******************',\n flush=True)\n patterns_logits = [[] for _ in patterns]\n samples_logits = [[] for _ in data]\n for i in range(len(data_generator_list)):\n print('\\nPattern{}'.format(i), flush=True)\n data_generator = data_generator_list[i]\n counter = 0\n for x, _ in tqdm(data_generator):\n outputs = model.predict(x[:2])\n for out in outputs:\n logit_pos = out[0].T\n patterns_logits[i].append(logit_pos)\n samples_logits[counter].append(logit_pos)\n counter += 1\n preds = []\n for i in range(len(patterns_logits[0])):\n pred = numpy.argmax([logits[i] for logits in patterns_logits])\n preds.append(int(pred))\n return preds, samples_logits\n\n\nif __name__ == '__main__':\n maxlen = 128\n batch_size = 40\n model_name = 'uer-mixed-bert-base'\n bert_model = Model(model_name=model_name)\n label_names = ['entertainment', 'sports', 'music', 'games', 'economics',\n 'education']\n patterns = ['This is {} news'.format(label) for label in label_names]\n is_pre = True\n demo_data_en = [\n 'FIFA unveils biennial World Cup plan, UEFA threatens boycott',\n 'COVID vaccines hold up against severe Delta: US data',\n 'Justin Drew Bieber was born on March 1, 1994 at St. ',\n 'Horizon launches latest chip to take on global rivals',\n 'Twitch video gamers rise up to stop ‘hate raids’']\n demo_data = demo_data_en\n demo_generator_list = []\n for p in patterns:\n demo_generator_list.append(data_generator(pattern=p, is_pre=is_pre,\n data=demo_data, batch_size=batch_size))\n tokenizer = Tokenizer('.' + bert_model.dict_path, do_lower_case=True)\n model = build_transformer_model(config_path='.' + bert_model.\n config_path, checkpoint_path='.' + bert_model.checkpoint_path,\n with_nsp=True)\n preds, samples_logits = predict(demo_generator_list, demo_data)\n for i, (p, d) in enumerate(zip(preds, demo_data)):\n pred_label = label_names[p]\n print('Sample {}:'.format(i))\n print('Original Text: {}'.format(d))\n print('Predict label: {}'.format(pred_label))\n print('Logits: {}'.format(samples_logits[i]))\n print()\n", "step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Sponge_sy\"\n# Date: 2021/9/11\n\n\nimport numpy\nfrom tqdm import tqdm\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.snippets import sequence_padding, DataGenerator\nfrom utils import *\n\n\nclass data_generator(DataGenerator):\n \"\"\"Data Generator\"\"\"\n\n def __init__(self, pattern=\"\", is_pre=True, *args, **kwargs):\n super(data_generator, self).__init__(*args, **kwargs)\n self.pattern = pattern\n self.is_pre = is_pre\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_output_ids = [], [], []\n for is_end, text in self.sample(random):\n if (self.is_pre):\n token_ids, segment_ids = tokenizer.encode(first_text=self.pattern, second_text=text, maxlen=maxlen)\n else:\n token_ids, segment_ids = tokenizer.encode(first_text=text, second_text=self.pattern, maxlen=maxlen)\n source_ids, target_ids = token_ids[:], token_ids[:]\n batch_token_ids.append(source_ids)\n batch_segment_ids.append(segment_ids)\n\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n yield [batch_token_ids, batch_segment_ids], None\n batch_token_ids, batch_segment_ids, = [], []\n\ndef predict(data_generator_list, data):\n print(\"\\n*******************Start to Zero-Shot predict*******************\", flush=True)\n patterns_logits = [[] for _ in patterns]\n samples_logits = [[] for _ in data]\n for i in range(len(data_generator_list)):\n print(\"\\nPattern{}\".format(i), flush=True)\n data_generator = data_generator_list[i]\n counter = 0\n for (x, _) in tqdm(data_generator):\n outputs = model.predict(x[:2])\n for out in outputs:\n logit_pos = out[0].T\n patterns_logits[i].append(logit_pos)\n samples_logits[counter].append(logit_pos)\n counter += 1\n preds = []\n for i in range(len(patterns_logits[0])):\n pred = numpy.argmax([logits[i] for logits in patterns_logits])\n preds.append(int(pred))\n return preds, samples_logits\n\nif __name__ == \"__main__\":\n\n # Load the hyper-parameters-----------------------------------------------------------\n maxlen = 128 # The max length 128 is used in our paper\n batch_size = 40 # Will not influence the results\n\n # Choose a model----------------------------------------------------------------------\n # Recommend to use 'uer-mixed-bert-base'\n # model_names = ['google-bert', 'google-bert-small', 'google-bert-zh',\n # 'hfl-bert-wwm', 'hfl-bert-wwm-ext',\n # 'uer-mixed-bert-tiny', 'uer-mixed-bert-small',\n # 'uer-mixed-bert-base', 'uer-mixed-bert-large']\n model_name = 'uer-mixed-bert-base'\n\n # Choose a dataset----------------------------------------------------------------------\n # dataset_names = ['eprstmt', 'tnews', 'csldcp', 'iflytek']\n # dataset_name = 'eprstmt'\n\n # Load model and dataset class\n bert_model = Model(model_name=model_name)\n\n # Create a template --------------------------------------------------------------------\n label_names = ['entertainment', 'sports', 'music', 'games', 'economics', 'education']\n patterns = [\"This is {} news\".format(label) for label in label_names]\n\n # Prefix or Suffix-------------------------------------------------------------------\n is_pre = True\n\n # Load the demo set--------------------------------------------------------------------\n\n demo_data_en = ['FIFA unveils biennial World Cup plan, UEFA threatens boycott',\n 'COVID vaccines hold up against severe Delta: US data',\n 'Justin Drew Bieber was born on March 1, 1994 at St. ',\n 'Horizon launches latest chip to take on global rivals',\n 'Twitch video gamers rise up to stop ‘hate raids’']\n\n demo_data = demo_data_en\n demo_generator_list = []\n for p in patterns:\n demo_generator_list.append(data_generator(pattern=p, is_pre=is_pre, data=demo_data, batch_size=batch_size))\n\n # Build BERT model---------------------------------------------------------------------\n tokenizer = Tokenizer('.' + bert_model.dict_path, do_lower_case=True)\n # Load BERET model with NSP head\n model = build_transformer_model(\n config_path='.' + bert_model.config_path, checkpoint_path='.' + bert_model.checkpoint_path, with_nsp=True,\n )\n\n # Zero-Shot predict and evaluate-------------------------------------------------------\n preds, samples_logits = predict(demo_generator_list, demo_data)\n for i, (p, d) in enumerate(zip(preds, demo_data)):\n pred_label = label_names[p]\n print(\"Sample {}:\".format(i))\n print(\"Original Text: {}\".format(d))\n print(\"Predict label: {}\".format(pred_label))\n print(\"Logits: {}\".format(samples_logits[i]))\n print()\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
import _thread import os from queue import Queue from threading import Thread import random import io import vk_api from vk_api.longpoll import VkLongPoll, VkEventType from datetime import datetime, timedelta import time from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from wordcloud import WordCloud import pymorphy2 from pymongo import MongoClient import config import matplotlib matplotlib.use('Agg') print('Connecting to VK...', end=' ') vk_group_session = vk_api.VkApi(token=config.vk_community_token) vk_group = vk_group_session.get_api() vk_session = vk_api.VkApi(token=config.vk_user_token) tools = vk_api.VkTools(vk_session) vk = vk_session.get_api() vk_upload = vk_api.VkUpload(vk_session) print('Done') print('Connecting to MongoDB...', end=' ') collection = MongoClient(config.mongo_host)[config.mongo_db]['photos'] print('Done') remove_words = ['год'] DIR = os.path.dirname(__file__) processing = [] current_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year def cloud(user_id): wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items'] wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall)) tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+') morph = pymorphy2.MorphAnalyzer() def transform(sentence): return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'), filter( lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag, tokenizer.tokenize(sentence.replace('\xa0', ' ')) ) ) top_words = [] for post in wall: if 'text' in post: top_words.extend(transform(post['text'])) if 'copy_history' in post: for copy in post['copy_history']: if 'text' in copy: top_words.extend(transform(copy['text'])) top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)) if not top_words: return # def color_func(word, font_size, position, orientation, random_state=None, **kwargs): # return "hsl(%d, 100%%, %d%%)" % (random.randint(0, 360), random.randint(20, 50)) def color_func(word, font_size, position, orientation, random_state=None, **kwargs): return "rgb(0, 0, 0)" sw = (stopwords.words('russian') + stopwords.words('english') + remove_words) wordcloud = WordCloud( max_words=50, max_font_size=500, background_color='white', margin=5, width=1000, height=1000, stopwords=sw, prefer_horizontal=0.7, font_path='font.ttf' ).generate(' '.join(top_words).lower()) wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image() img_arr = io.BytesIO() wordcloud.save(img_arr, format='PNG') img_arr.seek(0) return img_arr, wall, top_words def send_cloud(user_id, message, send=True): if user_id in processing: if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message=f'Подожди, я составляю твое облако тегов') return if message.lower() != 'облако': if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message=f'Если ты хочешь получить свое облако тегов за {current_year} ' 'год, отправь мне слово "облако" без кавычек 🙃') return processing.append(user_id) print('Generating cloud for', user_id) try: # if not vk.groups.isMember(group_id=config.group_id, user_id=user_id): # vk_group.messages.send(user_id=user_id, # random_id=random.randint(0, 99999999), # message='Чтобы составить облако тегов, ' # 'подпишись на меня https://vk.com/wwcloud 🙄') # time.sleep(1) # vk_group.messages.send(user_id=user_id, # random_id=random.randint(0, 99999999), # message='Когда будешь готов, снова отправь кодовое слово "облако" 😊') # processing.remove(user_id) # time.sleep(5) # return if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0: if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='Похоже, у тебя недостаточно записей на стене ' 'для составления облака тегов☹️') processing.remove(user_id) print('Removed (1) cloud from processing for', user_id) time.sleep(5) return if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋') user = vk.users.get(user_ids=user_id)[0] user_id = user['id'] name = user['first_name'] + ' ' + user['last_name'] clouded = cloud(user_id) if not clouded: if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='Похоже, у тебя недостаточно записей на стене ' 'для составления облака тегов ☹️') processing.remove(user_id) print('Removed (2) cloud from processing for', user_id) time.sleep(5) return clouded, wall, top_words = clouded photo = vk_upload.photo( clouded, album_id=config.album_id, group_id=config.group_id )[0] if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.format(photo['owner_id'], photo['id'])) vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉') post_id = None if len(top_words) > 100: try: post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1, message='Облако тегов для *id{}({})'.format(user_id, name), attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id'] except Exception as e: processing.remove(user_id) print(e) if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='Похоже, я превысил лимит количества постов на сегодня 😭') vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), message='Создай новое облако завтра, и я выложу его на стену группы 😎') print('Removed (3) cloud from processing for', user_id) if post_id: # collection.insert({ # 'user_id': user_id, # 'owner_id': photo['owner_id'], # 'id': photo['id'], # 'post': post_id, # 'timestamp': time.time(), # 'length': len(top_words) # }) if send: vk_group.messages.send(user_id=user_id, random_id=random.randint(0, 99999999), attachment='wall{}_{}'.format(photo['owner_id'], post_id)) # else: # collection.insert({ # 'user_id': user_id, # 'owner_id': photo['owner_id'], # 'id': photo['id'], # 'timestamp': time.time(), # 'length': len(top_words) # }) # if send: # vk_group.messages.send( # user_id=user_id, # random_id=random.randint(0, 99999999), # message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉', # attachment='wall-136503501_467' # ) processing.remove(user_id) print('Finished cloud for', user_id) except Exception as e: processing.remove(user_id) print('Finished cloud for', user_id, 'with error') raise e def worker(q, old=False): while True: # Получаем задание из очереди item = q.get() try: item[0](*item[1], **item[2]) except Exception: pass # Сообщаем о выполненном задании q.task_done() if __name__ == '__main__': q = Queue() for i in range(10): t = Thread(target=worker, args=(q,)) t.setDaemon(True) t.start() print('Initializing longpoll connection...', end=' ') longpoll = VkLongPoll(vk_group_session) print('Done') for event in longpoll.listen(): if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing: print(event.user_id, event.text) q.put((send_cloud, (event.user_id, event.text), {})) q.join()
normal
{ "blob_id": "03ce69924c885e59e40689dc63e50d54b89649f7", "index": 2924, "step-1": "<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\n<mask token>\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\n<mask token>\n", "step-2": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\n<mask token>\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\n<mask token>\nprint('Done')\n<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n", "step-3": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n", "step-4": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n", "step-5": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\n\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\n\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\n\nprocessing = []\n\ncurrent_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))\n\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),\n filter(\n lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,\n tokenizer.tokenize(sentence.replace('\\xa0', ' '))\n )\n )\n\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))\n if not top_words:\n return\n\n # def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n # return \"hsl(%d, 100%%, %d%%)\" % (random.randint(0, 360), random.randint(20, 50))\n\n def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n return \"rgb(0, 0, 0)\"\n\n sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)\n wordcloud = WordCloud(\n max_words=50,\n max_font_size=500,\n background_color='white',\n margin=5,\n width=1000,\n height=1000,\n stopwords=sw,\n prefer_horizontal=0.7,\n font_path='font.ttf'\n ).generate(' '.join(top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Если ты хочешь получить свое облако тегов за {current_year} '\n 'год, отправь мне слово \"облако\" без кавычек 🙃')\n return\n\n processing.append(user_id)\n\n print('Generating cloud for', user_id)\n try:\n # if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Чтобы составить облако тегов, '\n # 'подпишись на меня https://vk.com/wwcloud 🙄')\n # time.sleep(1)\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Когда будешь готов, снова отправь кодовое слово \"облако\" 😊')\n # processing.remove(user_id)\n # time.sleep(5)\n # return\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов☹️')\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов ☹️')\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(\n clouded,\n album_id=config.album_id,\n group_id=config.group_id\n )[0]\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',\n attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')\n\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,\n message='Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, я превысил лимит количества постов на сегодня 😭')\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Создай новое облако завтра, и я выложу его на стену группы 😎')\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'post': post_id,\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n attachment='wall{}_{}'.format(photo['owner_id'], post_id))\n # else:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n\n # if send:\n # vk_group.messages.send(\n # user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',\n # attachment='wall-136503501_467'\n # )\n\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n # Получаем задание из очереди\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n # Сообщаем о выполненном задании\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n\n for event in longpoll.listen():\n if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
# Generated by Django 2.0.7 on 2018-09-27 13:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('education', '0005_auto_20180927_1041'), ] operations = [ migrations.RemoveField( model_name='educationgroup', name='students', ), migrations.AddField( model_name='student', name='education_groups', field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'), ), ]
normal
{ "blob_id": "8ff7ace102b781b35fff0671e2c606bf662e2767", "index": 9851, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n", "step-5": "# Generated by Django 2.0.7 on 2018-09-27 13:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('education', '0005_auto_20180927_1041'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='educationgroup',\n name='students',\n ),\n migrations.AddField(\n model_name='student',\n name='education_groups',\n field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python import rospy import rosnode import csv import datetime import rosbag import sys import os import matplotlib.pyplot as plt import argparse import math from math import hypot import numpy as np from sensor_msgs.msg import LaserScan from std_msgs.msg import String import yaml as yaml start_time = None value_dict = {} combine = False #bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario1/fahrt3.bag' #bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario2/fahrt1.bag' #bag_dir = '/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/reference_bag.bag' ''' "rosservice call /change_material \"{name: \"Gazebo/Grey\", reflectivity: 0.2, transmission:\ \ 0.0, absorption: 0.1, angular_factor: 0.3}\"" ''' def compute_std(mean, liste): temp = [] for item in liste: temp.append((mean - item)**2) nm = sum(temp)/ float(len(temp)) return math.sqrt(nm) def load_file(filePath,file_name): dict_ = {} rospy.loginfo("Loading: %s",filePath+"/"+file_name) try: rospy.loginfo("Loading: %s",file_name) file = open(filePath+file_name,'r') dict_ = yaml.load(file) except yaml.YAMLError as exc: print(exc) rospy.logerr('Failed to load: %s From: %s',file_name,filePath) file.close() return dict_ def get_params(temp): p = {} #temp = temp.split("{")[1] temp = temp.split(",") temp2 = temp[1].split(":")[1] p['reflectivity']=float(temp2.replace(" ", "").replace("\\","")) temp2 = temp[2].split(":")[1] temp2 = temp2.replace("\\","").replace(" ","") p['transmission'] = float(temp2) temp2 = temp[3].split(":")[1] temp2 = temp2.replace("\\","").replace(" ","") p['absorption'] = float(temp2) temp2 = temp[4].split(":")[1] temp2 = temp2.replace("\\","").replace(" ","") temp2 = temp2.replace("}","").replace("\"","") p['angular_factor'] = float(temp2) return p def init(): rospy.init_node("monitoring_bag_topic_extract") def get_bag_data(): path = "/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/" ref = "reference_angels.yaml" ref_dict = load_file(path,ref) angels = ref_dict['angels2'] indexes = ref_dict['index'] ranges = ref_dict['ranges'] for f in os.listdir(path): if rospy.is_shutdown(): break if f.startswith("bag") and f.endswith(".bag"): print "Loading Bag: "+path+f bag = rosbag.Bag(path+f) params = {} scans = [] for topic, msg, t in bag.read_messages(): if topic == "/material_laser_scan": scans.append(msg.ranges) if topic == "/info_vals" and not params: params = get_params(msg.data.split("{")[1]) # compute mean_err, std_dev, data_loss per value scan_info = {} for scan in scans: for idx, val in enumerate(scan): if idx in indexes: #all val should be on the plate i = indexes.index(idx) if idx not in scan_info.keys(): #print str(val) scan_info[idx] = [0,0,0.0,[],0.0,0.0] scan_info[idx][4] = round(ranges[i], 5) scan_info[idx][5] = angels[i] if val <= 0.8: scan_info[idx][1] +=1 scan_info[idx][2] +=val scan_info[idx][3].append(val) else: scan_info[idx][0] +=1 final_d = {} final_d["params"] = params for key in scan_info.keys(): final_d[key] = {} final_d[key]['ref_range'] = scan_info[key][4] final_d[key]['angle'] = scan_info[key][5] if scan_info[key][3]: #if there is at least one element mean = scan_info[key][2] / scan_info[key][1] final_d[key]['mean_range'] = mean std = compute_std(mean, scan_info[key][3]) final_d[key]['stdev'] = std final_d[key]['loss'] = float(scan_info[key][0])/float((scan_info[key][1]+scan_info[key][0])) else: final_d[key]['mean_range'] = 0.0 final_d[key]['stdev'] = 0.0 final_d[key]['loss'] = 1.0 f1 = yaml.dump(final_d, default_flow_style=False) try: f = open('/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/yaml/'+f+'.yaml','w') f.write(f1) f.close() except Exception as inst: rospy.loginfo('%s',str(inst)) if __name__ == '__main__': init() get_bag_data()
normal
{ "blob_id": "c00a8bfec46ed829e413257bf97c44add564080d", "index": 8349, "step-1": "#!/usr/bin/env python\nimport rospy\nimport rosnode\nimport csv\nimport datetime\nimport rosbag\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\nimport math\nfrom math import hypot\nimport numpy as np\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import String\nimport yaml as yaml\nstart_time = None\nvalue_dict = {}\ncombine = False\n#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario1/fahrt3.bag'\n#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario2/fahrt1.bag'\n#bag_dir = '/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/reference_bag.bag'\n'''\n\"rosservice call /change_material \\\"{name: \\\"Gazebo/Grey\\\", reflectivity: 0.2, transmission:\\\n \\ 0.0, absorption: 0.1, angular_factor: 0.3}\\\"\"\n'''\n\ndef compute_std(mean, liste):\n temp = []\n for item in liste:\n temp.append((mean - item)**2)\n nm = sum(temp)/ float(len(temp))\n return math.sqrt(nm)\n\ndef load_file(filePath,file_name):\n dict_ = {}\n rospy.loginfo(\"Loading: %s\",filePath+\"/\"+file_name)\n try:\n rospy.loginfo(\"Loading: %s\",file_name)\n file = open(filePath+file_name,'r')\n dict_ = yaml.load(file)\n except yaml.YAMLError as exc:\n print(exc)\n rospy.logerr('Failed to load: %s From: %s',file_name,filePath)\n file.close()\n return dict_\n\ndef get_params(temp):\n p = {}\n #temp = temp.split(\"{\")[1]\n temp = temp.split(\",\")\n temp2 = temp[1].split(\":\")[1]\n p['reflectivity']=float(temp2.replace(\" \", \"\").replace(\"\\\\\",\"\"))\n temp2 = temp[2].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n p['transmission'] = float(temp2)\n temp2 = temp[3].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n p['absorption'] = float(temp2)\n temp2 = temp[4].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n temp2 = temp2.replace(\"}\",\"\").replace(\"\\\"\",\"\")\n p['angular_factor'] = float(temp2)\n return p\n\ndef init():\n rospy.init_node(\"monitoring_bag_topic_extract\")\n\ndef get_bag_data():\n path = \"/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/\"\n ref = \"reference_angels.yaml\"\n ref_dict = load_file(path,ref)\n angels = ref_dict['angels2']\n indexes = ref_dict['index']\n ranges = ref_dict['ranges']\n for f in os.listdir(path):\n if rospy.is_shutdown():\n break\n if f.startswith(\"bag\") and f.endswith(\".bag\"):\n print \"Loading Bag: \"+path+f\n bag = rosbag.Bag(path+f)\n params = {}\n scans = []\n for topic, msg, t in bag.read_messages():\n if topic == \"/material_laser_scan\":\n scans.append(msg.ranges)\n if topic == \"/info_vals\" and not params:\n params = get_params(msg.data.split(\"{\")[1])\n # compute mean_err, std_dev, data_loss per value\n scan_info = {}\n for scan in scans:\n for idx, val in enumerate(scan):\n if idx in indexes:\n #all val should be on the plate\n i = indexes.index(idx)\n if idx not in scan_info.keys():\n #print str(val)\n scan_info[idx] = [0,0,0.0,[],0.0,0.0]\n scan_info[idx][4] = round(ranges[i], 5)\n scan_info[idx][5] = angels[i]\n if val <= 0.8:\n scan_info[idx][1] +=1\n scan_info[idx][2] +=val\n scan_info[idx][3].append(val)\n else:\n scan_info[idx][0] +=1\n final_d = {}\n final_d[\"params\"] = params\n for key in scan_info.keys():\n final_d[key] = {}\n final_d[key]['ref_range'] = scan_info[key][4]\n final_d[key]['angle'] = scan_info[key][5]\n if scan_info[key][3]:\n #if there is at least one element\n mean = scan_info[key][2] / scan_info[key][1]\n\n final_d[key]['mean_range'] = mean\n std = compute_std(mean, scan_info[key][3])\n\n final_d[key]['stdev'] = std\n final_d[key]['loss'] = float(scan_info[key][0])/float((scan_info[key][1]+scan_info[key][0]))\n else:\n final_d[key]['mean_range'] = 0.0\n final_d[key]['stdev'] = 0.0\n final_d[key]['loss'] = 1.0\n\n f1 = yaml.dump(final_d, default_flow_style=False)\n try:\n f = open('/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/yaml/'+f+'.yaml','w') \n f.write(f1)\n f.close()\n except Exception as inst:\n rospy.loginfo('%s',str(inst))\n\n\nif __name__ == '__main__':\n init()\n get_bag_data()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np count = 0 # счетчик попыток number = np.random.randint(1, 101) # загадали число print("Загадано число от 1 до 100") def game_core_v3(number): '''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного. Функция принимает загаданное число и возвращает число попыток''' count = 1 allAnsvers = [x for x in range(1, 101)] a = int(len(allAnsvers) / 2) - 1 predict = allAnsvers[a] tempList = allAnsvers while number != predict: count += 1 if predict > number: tempList = tempList[0: a] a = int(len(tempList) / 2) - 1 elif predict < number: tempList = tempList[a:] a = int(len(tempList) / 2) predict = tempList[a] return(count) # выход из цикла, если угадали def score_game(game_core): '''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число''' count_ls = [] np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим! random_array = np.random.randint(1, 101, size=(1000)) for number in random_array: count_ls.append(game_core(number)) score = int(np.mean(count_ls)) print(f"Ваш алгоритм угадывает число в среднем за {score} попыток") return (score) # запускаем score_game(game_core_v3)
normal
{ "blob_id": "66474b8cdca9a4aa48b8dc710d161a3a16495aed", "index": 6438, "step-1": "<mask token>\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\n<mask token>\n", "step-2": "<mask token>\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n", "step-3": "<mask token>\ncount = 0\nnumber = np.random.randint(1, 101)\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n", "step-4": "import numpy as np\ncount = 0\nnumber = np.random.randint(1, 101)\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n", "step-5": "import numpy as np\n\ncount = 0 # счетчик попыток\nnumber = np.random.randint(1, 101) # загадали число\nprint(\"Загадано число от 1 до 100\")\n\ndef game_core_v3(number):\n '''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток'''\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0: a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return(count) # выход из цикла, если угадали\n\ndef score_game(game_core):\n '''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''\n count_ls = []\n np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!\n random_array = np.random.randint(1, 101, size=(1000))\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f\"Ваш алгоритм угадывает число в среднем за {score} попыток\")\n return (score)\n\n\n# запускаем\nscore_game(game_core_v3)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import threading import serial import time bno = serial.Serial('/dev/ttyUSB0', 115200, timeout=.5) compass_heading = -1.0 def readBNO(): global compass_heading try: bno.write(b'g') response = bno.readline().decode() if response != '': compass_heading = float(response.split('\r')[0]) except: pass def readContinuous(): while True: readBNO() time.sleep(.1) bno_thread = threading.Thread(target=readContinuous) bno_thread.start() def get_heading(): return compass_heading if __name__ == '__main__': while True: print(get_heading()) time.sleep(.1)
normal
{ "blob_id": "63a7225abc511b239a69f625b12c1458c75b4090", "index": 8904, "step-1": "<mask token>\n\n\ndef readContinuous():\n while True:\n readBNO()\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef get_heading():\n return compass_heading\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef readBNO():\n global compass_heading\n try:\n bno.write(b'g')\n response = bno.readline().decode()\n if response != '':\n compass_heading = float(response.split('\\r')[0])\n except:\n pass\n\n\ndef readContinuous():\n while True:\n readBNO()\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef get_heading():\n return compass_heading\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef readBNO():\n global compass_heading\n try:\n bno.write(b'g')\n response = bno.readline().decode()\n if response != '':\n compass_heading = float(response.split('\\r')[0])\n except:\n pass\n\n\ndef readContinuous():\n while True:\n readBNO()\n time.sleep(0.1)\n\n\n<mask token>\nbno_thread.start()\n\n\ndef get_heading():\n return compass_heading\n\n\nif __name__ == '__main__':\n while True:\n print(get_heading())\n time.sleep(0.1)\n", "step-4": "<mask token>\nbno = serial.Serial('/dev/ttyUSB0', 115200, timeout=0.5)\ncompass_heading = -1.0\n\n\ndef readBNO():\n global compass_heading\n try:\n bno.write(b'g')\n response = bno.readline().decode()\n if response != '':\n compass_heading = float(response.split('\\r')[0])\n except:\n pass\n\n\ndef readContinuous():\n while True:\n readBNO()\n time.sleep(0.1)\n\n\nbno_thread = threading.Thread(target=readContinuous)\nbno_thread.start()\n\n\ndef get_heading():\n return compass_heading\n\n\nif __name__ == '__main__':\n while True:\n print(get_heading())\n time.sleep(0.1)\n", "step-5": "import threading\nimport serial\nimport time\n\nbno = serial.Serial('/dev/ttyUSB0', 115200, timeout=.5)\ncompass_heading = -1.0\n\ndef readBNO():\n global compass_heading\n try:\n bno.write(b'g')\n response = bno.readline().decode()\n if response != '':\n compass_heading = float(response.split('\\r')[0])\n except:\n pass\n\ndef readContinuous():\n while True:\n readBNO()\n time.sleep(.1)\n\nbno_thread = threading.Thread(target=readContinuous)\nbno_thread.start()\n\ndef get_heading():\n return compass_heading\n\nif __name__ == '__main__':\n while True:\n print(get_heading())\n time.sleep(.1)", "step-ids": [ 2, 3, 4, 5, 7 ] }
[ 2, 3, 4, 5, 7 ]
import numpy as np import copy ''' 本脚本主要用来实现决策树的相关内容。 constrcut_tree:该函数是构建决策树的主要函数 其输入:数据集X:n*p n:样本数,p-1维特征,p为样本类别, 以及属性信息label:属性名称,p-1一维数组,label表示的是此时X每一列对应的属性名称 决策结构用字典来表示,例如{attribution1:{0:{attribution2:{}},1:{attribution3:{}}} ''' def construct_tree(X,label): classList = [sample[-1] for sample in X] #如果此时所有的样本的类别相同,返回该类别。 if classList.count(classList[0]) == len(classList): return classList[0] #如果此时对应属性已经划分完毕 if len(X[0])==1: return return_major(classList) #如果此时划分之后的子集为空,但是显然这是不可能的,对于这种情况来说, #因为我们后面的编程过程中,我的属性划分的个数是根据,此时样本的属性数 #得到的,而不是一开始默认的,注意于西瓜书上算法的区别 #选择最优划分属性: bestFea = bestdived(X) bestFeaName = label[bestFea] feaValue = [x[bestFea] for x in X] uniqueValue = set(feaValue) myTree = {bestFeaName:{}} del(label[bestFea]) for i in uniqueValue: myTree[bestFeaName][i]=construct_tree(splitDataSet(X,bestFea,i),label) return myTree #统计一组数据中,出现次数最多的时候用以下代码 def return_major(Y): #给定一组类别,返回这组数据中,最大的类别 label_count={} for i in Y: label_count[i] = label_count.get(i,0)+1 sorted_class = sorted(label_count.items(),key=operator.itemgetter(1),reverse=True) return sorted_class[0][0] def splitDataSet(X,fea,value): #根据属性的某个值得到相应的数据集 y = [] tem = copy.deepcopy(X) for i in tem: if i[fea] == value: del(i[fea]) y.append(i) return y def bestdived(X): #对任何一个特征进行划分,计算得到的数据集的熵。然后计算 #这个特征对应的信息增益 baseEnt = calcEnt(X) tem0 = 0#记录最大的信息增益 for i in range(len(X[0])-1): #fea 循环 feaValue = [x[i] for x in X] uniqueValue = set(feaValue) tem1 = 0#记录该特征划分的子集熵的总和 for j in uniqueValue: subDataset = splitDataSet(X,i,j) prob = len(subDataset)/len(X) tem1 = tem1 + prob*calcEnt(subDataset) infoGain = baseEnt - tem1 if infoGain > tem0: tem0 = infoGain bestFea = i return bestFea def calcEnt(X): #计算数据即X的熵,此时的熵是当对于类别信息来的。 labelCount = {} for i in X: i = i[-1] labelCount[i] = labelCount.get(i,0)+1; tem = np.array(list(labelCount.values())) tem = tem/len(X) return np.sum(-np.log(tem)*tem)
normal
{ "blob_id": "ff66b33a133b627ba2329434d6c1649c94b6ec78", "index": 8188, "step-1": "<mask token>\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\n<mask token>\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n", "step-2": "<mask token>\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n", "step-3": "<mask token>\n\n\ndef construct_tree(X, label):\n classList = [sample[-1] for sample in X]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n if len(X[0]) == 1:\n return return_major(classList)\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName: {}}\n del label[bestFea]\n for i in uniqueValue:\n myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),\n label)\n return myTree\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n", "step-4": "import numpy as np\nimport copy\n<mask token>\n\n\ndef construct_tree(X, label):\n classList = [sample[-1] for sample in X]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n if len(X[0]) == 1:\n return return_major(classList)\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName: {}}\n del label[bestFea]\n for i in uniqueValue:\n myTree[bestFeaName][i] = construct_tree(splitDataSet(X, bestFea, i),\n label)\n return myTree\n\n\ndef return_major(Y):\n label_count = {}\n for i in Y:\n label_count[i] = label_count.get(i, 0) + 1\n sorted_class = sorted(label_count.items(), key=operator.itemgetter(1),\n reverse=True)\n return sorted_class[0][0]\n\n\ndef splitDataSet(X, fea, value):\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del i[fea]\n y.append(i)\n return y\n\n\ndef bestdived(X):\n baseEnt = calcEnt(X)\n tem0 = 0\n for i in range(len(X[0]) - 1):\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0\n for j in uniqueValue:\n subDataset = splitDataSet(X, i, j)\n prob = len(subDataset) / len(X)\n tem1 = tem1 + prob * calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\n\ndef calcEnt(X):\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i, 0) + 1\n tem = np.array(list(labelCount.values()))\n tem = tem / len(X)\n return np.sum(-np.log(tem) * tem)\n", "step-5": "import numpy as np\nimport copy\n'''\n本脚本主要用来实现决策树的相关内容。\nconstrcut_tree:该函数是构建决策树的主要函数\n其输入:数据集X:n*p n:样本数,p-1维特征,p为样本类别,\n以及属性信息label:属性名称,p-1一维数组,label表示的是此时X每一列对应的属性名称\n决策结构用字典来表示,例如{attribution1:{0:{attribution2:{}},1:{attribution3:{}}}\n'''\n\ndef construct_tree(X,label):\n \n classList = [sample[-1] for sample in X]\n #如果此时所有的样本的类别相同,返回该类别。\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n #如果此时对应属性已经划分完毕\n if len(X[0])==1:\n return return_major(classList)\n #如果此时划分之后的子集为空,但是显然这是不可能的,对于这种情况来说,\n #因为我们后面的编程过程中,我的属性划分的个数是根据,此时样本的属性数\n #得到的,而不是一开始默认的,注意于西瓜书上算法的区别\n\n #选择最优划分属性:\n bestFea = bestdived(X)\n bestFeaName = label[bestFea]\n feaValue = [x[bestFea] for x in X]\n uniqueValue = set(feaValue)\n myTree = {bestFeaName:{}}\n del(label[bestFea])\n for i in uniqueValue:\n myTree[bestFeaName][i]=construct_tree(splitDataSet(X,bestFea,i),label)\n return myTree\n\n\n\n\n#统计一组数据中,出现次数最多的时候用以下代码\ndef return_major(Y):\n #给定一组类别,返回这组数据中,最大的类别\n label_count={}\n for i in Y:\n label_count[i] = label_count.get(i,0)+1\n sorted_class = sorted(label_count.items(),key=operator.itemgetter(1),reverse=True)\n return sorted_class[0][0]\n\ndef splitDataSet(X,fea,value):\n #根据属性的某个值得到相应的数据集\n y = []\n tem = copy.deepcopy(X)\n for i in tem:\n if i[fea] == value:\n del(i[fea])\n y.append(i)\n return y\n\ndef bestdived(X):\n #对任何一个特征进行划分,计算得到的数据集的熵。然后计算\n #这个特征对应的信息增益\n baseEnt = calcEnt(X)\n tem0 = 0#记录最大的信息增益\n for i in range(len(X[0])-1):\n #fea 循环\n feaValue = [x[i] for x in X]\n uniqueValue = set(feaValue)\n tem1 = 0#记录该特征划分的子集熵的总和\n for j in uniqueValue:\n subDataset = splitDataSet(X,i,j)\n prob = len(subDataset)/len(X)\n tem1 = tem1 + prob*calcEnt(subDataset)\n infoGain = baseEnt - tem1\n if infoGain > tem0:\n tem0 = infoGain\n bestFea = i\n return bestFea\n\ndef calcEnt(X):\n #计算数据即X的熵,此时的熵是当对于类别信息来的。\n labelCount = {}\n for i in X:\n i = i[-1]\n labelCount[i] = labelCount.get(i,0)+1;\n tem = np.array(list(labelCount.values()))\n tem = tem/len(X)\n return np.sum(-np.log(tem)*tem)\n\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from django.db import models # Create your models here. class Tutorial(models.Model): web_title = models.CharField(max_length=200) web_content = models.TextField() web_published = models.DateTimeField("date published") def __str__(self): return self.web_title
normal
{ "blob_id": "32499688db51f701173ec0ea212c483bf902c109", "index": 3048, "step-1": "<mask token>\n\n\nclass Tutorial(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Tutorial(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.web_title\n", "step-3": "<mask token>\n\n\nclass Tutorial(models.Model):\n web_title = models.CharField(max_length=200)\n web_content = models.TextField()\n web_published = models.DateTimeField('date published')\n\n def __str__(self):\n return self.web_title\n", "step-4": "from django.db import models\n\n\nclass Tutorial(models.Model):\n web_title = models.CharField(max_length=200)\n web_content = models.TextField()\n web_published = models.DateTimeField('date published')\n\n def __str__(self):\n return self.web_title\n", "step-5": "from django.db import models\n\n# Create your models here.\nclass Tutorial(models.Model):\n\tweb_title = models.CharField(max_length=200)\n\tweb_content = models.TextField()\n\tweb_published = models.DateTimeField(\"date published\")\n\n\tdef __str__(self):\n\t\treturn self.web_title\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class Node: def __init__(self,data): self.data = data self.next = None self.prev = None class dequeue: def __init__(self): self.front = None self.last = None self.count = 0 def add_front(self, data): new_nodef = Node(data) if(self.front == None): self.front = self.last = new_nodef self.count +=1 else: new_nodef.next = self.front self.front.prev = new_nodef self.front = new_nodef self.count +=1 def add_last(self,data): new_nodeb = Node(data) if(self.last == None): self.last = self.front = new_nodeb self.count +=1 else: new_nodeb.prev = self.last self.last.next = new_nodeb self.last = new_nodeb self.count +=1 def print_list(self): if(self.front == None): return temp = self.front while(temp != None): print(temp.data) temp = temp.next def remove_front(self): if(self.front == None): return else: self.front = self.front.next if(self.front == None): self.last = None return self.count -= 1 self.front.prev = None def remove_last(self): if(self.last == None): return else: self.last = self.last.prev if(self.last == None): self.front = None return self.count -= 1 self.last.next = None def is_empty(self): if(self.count == 0): return True else: return False def size(self): print(self.count) def entry(self): pal_to_check = str(input("Enter the string to check whether palindrome or not :")) pal_list = [str(i) for i in pal_to_check] print(pal_list) pal_check_con = llist.pal_check(pal_list) print("Is palindrome :",pal_check_con) def pal_check(self, pal_lis): for i in pal_lis: llist.add_front(i) while(self.count != 0): if(self.front.data == self.last.data): llist.remove_front() if(self.count > 1): llist.remove_last() else: return False if(self.count == 1): break return True #Driver function if __name__=="__main__": llist = dequeue() llist.entry()
normal
{ "blob_id": "2f6e0b6a7e14ac9c5a38db6fd2b1cf23cff7144e", "index": 172, "step-1": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n <mask token>\n\n def size(self):\n print(self.count)\n <mask token>\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n", "step-3": "class Node:\n <mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n", "step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n", "step-5": "class Node:\n\n def __init__(self,data):\n self.data = data\n self.next = None\n self.prev = None \n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if(self.front == None):\n self.front = self.last = new_nodef\n self.count +=1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count +=1\n\n \n def add_last(self,data):\n new_nodeb = Node(data)\n if(self.last == None):\n self.last = self.front = new_nodeb\n self.count +=1\n\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb \n self.count +=1\n \n def print_list(self):\n if(self.front == None):\n return\n temp = self.front\n while(temp != None):\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if(self.front == None):\n return\n else:\n self.front = self.front.next\n if(self.front == None):\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if(self.last == None):\n return\n else:\n self.last = self.last.prev\n if(self.last == None):\n self.front = None\n return\n self.count -= 1 \n self.last.next = None\n \n def is_empty(self):\n if(self.count == 0):\n return True\n else: \n return False\n def size(self):\n print(self.count)\n \n\n def entry(self):\n \n pal_to_check = str(input(\"Enter the string to check whether palindrome or not :\"))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print(\"Is palindrome :\",pal_check_con)\n \n def pal_check(self, pal_lis): \n for i in pal_lis:\n llist.add_front(i)\n while(self.count != 0):\n if(self.front.data == self.last.data):\n llist.remove_front()\n if(self.count > 1):\n llist.remove_last() \n else:\n return False\n if(self.count == 1):\n break \n return True\n\n\n#Driver function\nif __name__==\"__main__\":\n \n llist = dequeue()\n llist.entry()\n\n", "step-ids": [ 8, 10, 12, 13, 15 ] }
[ 8, 10, 12, 13, 15 ]
# coding=utf-8 class HtmlDownload(object): """docstring for HtmlDownload""" def html_download(city, keyWords, pages): # root URL paras = { 'jl': city, 'kw': keyWords, 'pages': pages, 'isadv': 0 } url = "http://sou.zhaopin.com/jobs/searchresult.ashx?" + urlencode(paras) response = requests.get(url) if response.status_code == 200: return response.text else: return None
normal
{ "blob_id": "e33aca56e4c9f82779278e836308c2e22d3356e2", "index": 3770, "step-1": "<mask token>\n", "step-2": "class HtmlDownload(object):\n <mask token>\n <mask token>\n", "step-3": "class HtmlDownload(object):\n <mask token>\n\n def html_download(city, keyWords, pages):\n paras = {'jl': city, 'kw': keyWords, 'pages': pages, 'isadv': 0}\n url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?' + urlencode(\n paras)\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n else:\n return None\n", "step-4": "class HtmlDownload(object):\n \"\"\"docstring for HtmlDownload\"\"\"\n\n def html_download(city, keyWords, pages):\n paras = {'jl': city, 'kw': keyWords, 'pages': pages, 'isadv': 0}\n url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?' + urlencode(\n paras)\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n else:\n return None\n", "step-5": "# coding=utf-8\nclass HtmlDownload(object):\n\t\"\"\"docstring for HtmlDownload\"\"\"\n\n\tdef html_download(city, keyWords, pages):\n # root URL\n\t paras = {\n\t 'jl': city,\n\t 'kw': keyWords,\n\t 'pages': pages,\n\t 'isadv': 0\n\t }\n\t url = \"http://sou.zhaopin.com/jobs/searchresult.ashx?\" + urlencode(paras)\n\t response = requests.get(url)\n\t if response.status_code == 200:\n\t return response.text\n\t else:\n\t return None", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from __future__ import unicode_literals from django.db import models # Create your models here. class Group(models.Model): name = models.CharField(max_length=200, db_index=True) loan_eligibility = models.CharField(max_length=200, db_index=True) account_number = models.CharField(max_length=200, db_index=True) incharge = models.CharField(max_length=200, db_index=True) incharge2 = models.CharField(max_length=200, db_index=True) class Member(models.Model): name = models.CharField(max_length=200, db_index=True) age = models.CharField(max_length=200) phone = models.CharField(max_length=200) address1 = models.CharField(max_length=200) address2 = models.CharField(max_length=200) phone = models.CharField(max_length=200)
normal
{ "blob_id": "0c8b58acf33bdfa95984d29a75ae01e49d0da149", "index": 9202, "step-1": "<mask token>\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n", "step-2": "<mask token>\n\n\nclass Group(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n", "step-3": "<mask token>\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import models\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n", "step-5": "from __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.contrib import admin from .models import Game, Scrap admin.site.register(Game) admin.site.register(Scrap)
normal
{ "blob_id": "7e328992392a4ff2b0e23920a8907e38f63fcff0", "index": 7168, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n", "step-3": "from django.contrib import admin\nfrom .models import Game, Scrap\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django import forms from .models import Profile class ImageForm(forms.ModelForm): userimage = forms.ImageField(required=False, error_messages={'invalid':("Image file only")}, widget=forms.FileInput) class Meta: model = Profile fields = ['userimage',]
normal
{ "blob_id": "9081d0f75ac53ab8d0bafb39cd46a2fec8a5135f", "index": 3813, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ImageForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-3": "<mask token>\n\n\nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':\n 'Image file only'}, widget=forms.FileInput)\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-4": "from django import forms\nfrom .models import Profile\n\n\nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':\n 'Image file only'}, widget=forms.FileInput)\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-5": "from django import forms\nfrom .models import Profile\n\n\n\n\n \nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':(\"Image file only\")}, widget=forms.FileInput)\n class Meta:\n model = Profile\n fields = ['userimage',]\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from OTXv2 import OTXv2 from pandas.io.json import json_normalize from datetime import datetime, timedelta import getopt import sys from sendemail import sendemail from main import otx import csv import pandas as pd from pandas import read_csv import os.path def tools(): search = str(input('Please enter search: ')) search.strip() pulsesJSON = otx.search_pulses(search, 40) # Retrieves list (in json format) of top 40 pulses with tag "crypto" # Loops through each individual pulse retrieved from OTX, and prints name & requested fields. for aPulse in pulsesJSON["results"]: name = aPulse.get('name') description = aPulse.get('description') modified = aPulse.get('modified') pulseid = aPulse.get('id') ''' If needed, add more categories to pull for each pulse here. ''' #list with data to add to csv file raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description': description, 'Modified': modified}] #the path to the file filename = 'shenzi_pulses.csv' #use to check for the file #file_exists = os.path.isfile(filename) #opens the file to append ID, Name, Modified, Description with open(filename, "w") as csv_file: csv_columns_headers = ['Pulse ID','Name','Description','Modified'] writer = csv.DictWriter(csv_file, delimiter=',',lineterminator='\n', fieldnames=csv_columns_headers) #if file does not exist write the headers if not file_exists: writer.writeheader() #write the information from raw_data by rows else: for data in raw_data: writer.writerow(data) #simple option to email or quit option = input('1: To Email 2: To quit : ') option = int(option) if option == 1: #uses the email function to send email sendemail() #delete file once email has sent os.remove('pulseIdsList.csv') elif option == 2: #option to quit SystemExit()
normal
{ "blob_id": "659f45d2c6c7138f26b4a8d15d1710ae60450b08", "index": 6278, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef tools():\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40)\n for aPulse in pulsesJSON['results']:\n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified')\n pulseid = aPulse.get('id')\n \"\"\"\n If needed, add more categories to pull for each pulse here.\n \"\"\"\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':\n description, 'Modified': modified}]\n filename = 'shenzi_pulses.csv'\n with open(filename, 'w') as csv_file:\n csv_columns_headers = ['Pulse ID', 'Name', 'Description',\n 'Modified']\n writer = csv.DictWriter(csv_file, delimiter=',', lineterminator\n ='\\n', fieldnames=csv_columns_headers)\n if not file_exists:\n writer.writeheader()\n else:\n for data in raw_data:\n writer.writerow(data)\n option = input('1: To Email 2: To quit : ')\n option = int(option)\n if option == 1:\n sendemail()\n os.remove('pulseIdsList.csv')\n elif option == 2:\n SystemExit()\n", "step-3": "from OTXv2 import OTXv2\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nimport getopt\nimport sys\nfrom sendemail import sendemail\nfrom main import otx\nimport csv\nimport pandas as pd\nfrom pandas import read_csv\nimport os.path\n\n\ndef tools():\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40)\n for aPulse in pulsesJSON['results']:\n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified')\n pulseid = aPulse.get('id')\n \"\"\"\n If needed, add more categories to pull for each pulse here.\n \"\"\"\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':\n description, 'Modified': modified}]\n filename = 'shenzi_pulses.csv'\n with open(filename, 'w') as csv_file:\n csv_columns_headers = ['Pulse ID', 'Name', 'Description',\n 'Modified']\n writer = csv.DictWriter(csv_file, delimiter=',', lineterminator\n ='\\n', fieldnames=csv_columns_headers)\n if not file_exists:\n writer.writeheader()\n else:\n for data in raw_data:\n writer.writerow(data)\n option = input('1: To Email 2: To quit : ')\n option = int(option)\n if option == 1:\n sendemail()\n os.remove('pulseIdsList.csv')\n elif option == 2:\n SystemExit()\n", "step-4": "from OTXv2 import OTXv2\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nimport getopt\nimport sys\nfrom sendemail import sendemail\nfrom main import otx\nimport csv\nimport pandas as pd\nfrom pandas import read_csv\nimport os.path\n\ndef tools():\n\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40) # Retrieves list (in json format) of top 40 pulses with tag \"crypto\"\n\n # Loops through each individual pulse retrieved from OTX, and prints name & requested fields.\n\n for aPulse in pulsesJSON[\"results\"]:\n \n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified') \n pulseid = aPulse.get('id')\n\n '''\n If needed, add more categories to pull for each pulse here.\n '''\n \n #list with data to add to csv file\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description': description, 'Modified': modified}]\n\n #the path to the file\n filename = 'shenzi_pulses.csv'\n \n #use to check for the file\n #file_exists = os.path.isfile(filename)\n \n #opens the file to append ID, Name, Modified, Description\n with open(filename, \"w\") as csv_file:\n csv_columns_headers = ['Pulse ID','Name','Description','Modified']\n writer = csv.DictWriter(csv_file, delimiter=',',lineterminator='\\n', fieldnames=csv_columns_headers)\n #if file does not exist write the headers\n if not file_exists:\n writer.writeheader()\n #write the information from raw_data by rows\n else:\n for data in raw_data:\n writer.writerow(data)\n\n #simple option to email or quit \n option = input('1: To Email 2: To quit : ')\n \n option = int(option)\n \n if option == 1:\n #uses the email function to send email\n sendemail()\n #delete file once email has sent\n os.remove('pulseIdsList.csv')\n elif option == 2:\n #option to quit\n SystemExit() \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Generated by Django 2.2.2 on 2019-07-30 01:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('usuarios', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usuario', name='inicio', field=models.DateField(verbose_name='Data Inicio'), ), migrations.AlterField( model_name='usuario', name='saida', field=models.DateField(null=True, verbose_name='Data de Saida'), ), ]
normal
{ "blob_id": "5e4a334b373d912ba37b18f95e4866450bda5570", "index": 3938, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial')]\n operations = [migrations.AlterField(model_name='usuario', name='inicio',\n field=models.DateField(verbose_name='Data Inicio')), migrations.\n AlterField(model_name='usuario', name='saida', field=models.\n DateField(null=True, verbose_name='Data de Saida'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial')]\n operations = [migrations.AlterField(model_name='usuario', name='inicio',\n field=models.DateField(verbose_name='Data Inicio')), migrations.\n AlterField(model_name='usuario', name='saida', field=models.\n DateField(null=True, verbose_name='Data de Saida'))]\n", "step-5": "# Generated by Django 2.2.2 on 2019-07-30 01:25\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuarios', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='usuario',\n name='inicio',\n field=models.DateField(verbose_name='Data Inicio'),\n ),\n migrations.AlterField(\n model_name='usuario',\n name='saida',\n field=models.DateField(null=True, verbose_name='Data de Saida'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# import libraries import sys import pandas as pd import numpy as n from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): """ This function loads the message and categories files and merge them and return the new dataframe for the project """ # Read messages and categories data messaging = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) # Merge the two dataframes dataframe = messaging.merge(categories, how='inner', on= 'id') return dataframe def clean_data(dataframe): """ Cleaning the merged dataframe to make it ready to analyze """ # split categories into seperate categories = dataframe.categories.str.split(';', expand=True) # select the first row&col of the categories dataframe row&col = categories.iloc[0] cate_col = row&col.apply(lambda x: x[:-2]) cate.columns = cate_colnames #convert categories values to numeric instead of strings for column in categories: categories[column] = categories[column].str[-1] categories[column] = categories[column].astype(int) # replace categories column in dataframe dataframe.drop(columns = ['categories'], inplace=True) # concatenate the original dataframe with the new `categories` dataframe dataframe = dataframe.join(categories) #drop duplicates dataframe.drop_duplicates(inplace=True) return dataframe def save_data(dataframe, database_filename): """ Take the input dataframe and save it into sqlite database """ # Creating sqlite engine and save the dataframe with the name message engine_process = create_engine('sqlite:///Messages.db') dataframe.to_sql('messaging', engine_process, index=False,if_exists='replace') def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) dataframe = load_data(messages_filepath, categories_filepath) print('Cleaning data...') dataframe = clean_data(dataframe) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(dataframe, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
normal
{ "blob_id": "4642537f8af1f060f5ee43cc9e98bd07be6a558c", "index": 124, "step-1": "# import libraries\nimport sys\nimport pandas as pd\nimport numpy as n\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n This function loads the message and categories files and\n merge them and return the new dataframe for the project\n \"\"\"\n # Read messages and categories data\n messaging = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n\n # Merge the two dataframes\n dataframe = messaging.merge(categories, how='inner', on= 'id')\n return dataframe\n \n\n\ndef clean_data(dataframe):\n \"\"\"\n Cleaning the merged dataframe to make it ready to analyze\n \"\"\"\n # split categories into seperate\n categories = dataframe.categories.str.split(';', expand=True)\n \n # select the first row&col of the categories dataframe\n row&col = categories.iloc[0]\n cate_col = row&col.apply(lambda x: x[:-2])\n cate.columns = cate_colnames\n \n #convert categories values to numeric instead of strings\n for column in categories:\n categories[column] = categories[column].str[-1]\n categories[column] = categories[column].astype(int)\n \n # replace categories column in dataframe \n dataframe.drop(columns = ['categories'], inplace=True)\n # concatenate the original dataframe with the new `categories` dataframe\n dataframe = dataframe.join(categories)\n \n #drop duplicates\n dataframe.drop_duplicates(inplace=True)\n \n return dataframe\n\ndef save_data(dataframe, database_filename):\n \"\"\"\n Take the input dataframe and save it into sqlite database\n \"\"\"\n # Creating sqlite engine and save the dataframe with the name message\n engine_process = create_engine('sqlite:///Messages.db')\n dataframe.to_sql('messaging', engine_process, index=False,if_exists='replace') \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n dataframe = load_data(messages_filepath, categories_filepath)\n print('Cleaning data...')\n dataframe = clean_data(dataframe)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(dataframe, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from selenium import webdriver import time import xlwt from JD_PhoneNo import get_phone_no book = xlwt.Workbook(encoding="utf-8") sheet1=book.add_sheet("Sheet 1") browser = webdriver.Firefox() browser.get("https://www.zomato.com/bhopal/dinner") z_hotel_list = [] z_address_list = [] z_phone_list = [] z_rating_list = [] z_costoftwo = [] z_votes = [] z_hours = [] def traverse(a,b): temp = [] for i in range(a,b,1): a = str(i) button = browser.find_element_by_link_text(a) button.click() name_list = browser.find_elements_by_class_name("result-title.hover_feedback.zred.bold.ln24.fontsize0") add_list = browser.find_elements_by_class_name("col-m-16.search-result-address.grey-text.nowrap.ln22") phone_list = browser.find_elements_by_class_name("item.res-snippet-ph-info") for i in range(1,18): if(i==4 or i==10 ): continue else: try: z_costoftwo.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[2]/span[2]").text) except Exception as e: z_costoftwo.append("NILL") try: z_hours.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[3]/div[1]").text) except Exception as e1: z_hours.append("NILL") try: z_votes.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span").text) except Exception as e1: z_votes.append("NEW") try: z_rating_list.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]").text) except Exception as e: z_rating_list.append("NILL") for names in name_list: z_hotel_list.append(names.text) temp.append(names.text) for addname in add_list: z_address_list.append(addname.text) for phonename in phone_list: z_phone_list.append(phonename.get_attribute("data-phone-no-str")) if(int(a)<6): clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]") clk.click() else: clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]") clk.click() traverse(1,6) traverse(6,11) traverse(11,16) traverse(16,21) traverse(21,26) # traverse(26,31) # traverse(31,36) # traverse(36,41) # traverse(41,46) # traverse(46,51) # traverse(51,56) # for i in range(1,5,10): # traverse(i,i+5) # traverse(i+5,i+10) for i in range(0,len(z_hotel_list),1): sheet1.write(i,0,z_hotel_list[i]) for i in range(0, len(z_phone_list), 1): sheet1.write(i,1,z_phone_list[i]) for i in range(0, len(z_address_list), 1): sheet1.write(i, 2, z_address_list[i]) for i in range(0,len(z_rating_list)): sheet1.write(i,3,z_rating_list[i]) for i in range(0, len(z_costoftwo)): sheet1.write(i, 4, z_costoftwo[i]) for i in range(0, len(z_hours)): sheet1.write(i, 5, z_hours[i]) for i in range(0, len(z_votes)): sheet1.write(i, 6, z_votes[i]) print("Writing to excel Finished") book.save("ZomatoBhopal(data).xls")
normal
{ "blob_id": "96425986305171a9d23231f60b35dcbcbbd12d2d", "index": 7995, "step-1": "<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\n<mask token>\n", "step-2": "<mask token>\nbrowser.get('https://www.zomato.com/bhopal/dinner')\n<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n", "step-3": "<mask token>\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n", "step-4": "from selenium import webdriver\nimport time\nimport xlwt\nfrom JD_PhoneNo import get_phone_no\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n", "step-5": "from selenium import webdriver\r\nimport time\r\nimport xlwt\r\nfrom JD_PhoneNo import get_phone_no\r\nbook = xlwt.Workbook(encoding=\"utf-8\")\r\nsheet1=book.add_sheet(\"Sheet 1\")\r\nbrowser = webdriver.Firefox()\r\nbrowser.get(\"https://www.zomato.com/bhopal/dinner\")\r\nz_hotel_list = []\r\nz_address_list = []\r\nz_phone_list = []\r\nz_rating_list = []\r\nz_costoftwo = []\r\nz_votes = []\r\nz_hours = []\r\n\r\ndef traverse(a,b):\r\n temp = []\r\n for i in range(a,b,1):\r\n a = str(i)\r\n button = browser.find_element_by_link_text(a)\r\n button.click()\r\n name_list = browser.find_elements_by_class_name(\"result-title.hover_feedback.zred.bold.ln24.fontsize0\")\r\n add_list = browser.find_elements_by_class_name(\"col-m-16.search-result-address.grey-text.nowrap.ln22\")\r\n phone_list = browser.find_elements_by_class_name(\"item.res-snippet-ph-info\")\r\n for i in range(1,18):\r\n if(i==4 or i==10 ):\r\n continue\r\n else:\r\n try:\r\n z_costoftwo.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[2]/span[2]\").text)\r\n except Exception as e:\r\n z_costoftwo.append(\"NILL\")\r\n try:\r\n z_hours.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[3]/div[1]\").text)\r\n except Exception as e1:\r\n z_hours.append(\"NILL\")\r\n try:\r\n z_votes.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span\").text)\r\n except Exception as e1:\r\n z_votes.append(\"NEW\")\r\n try:\r\n z_rating_list.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]\").text)\r\n except Exception as e:\r\n z_rating_list.append(\"NILL\")\r\n for names in name_list:\r\n z_hotel_list.append(names.text)\r\n temp.append(names.text)\r\n for addname in add_list:\r\n z_address_list.append(addname.text)\r\n for phonename in phone_list:\r\n z_phone_list.append(phonename.get_attribute(\"data-phone-no-str\"))\r\n if(int(a)<6):\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]\")\r\n clk.click()\r\n else:\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]\")\r\n clk.click()\r\ntraverse(1,6)\r\ntraverse(6,11)\r\ntraverse(11,16)\r\ntraverse(16,21)\r\ntraverse(21,26)\r\n# traverse(26,31)\r\n# traverse(31,36)\r\n# traverse(36,41)\r\n# traverse(41,46)\r\n# traverse(46,51)\r\n# traverse(51,56)\r\n# for i in range(1,5,10):\r\n# traverse(i,i+5)\r\n# traverse(i+5,i+10)\r\nfor i in range(0,len(z_hotel_list),1):\r\n sheet1.write(i,0,z_hotel_list[i])\r\nfor i in range(0, len(z_phone_list), 1):\r\n sheet1.write(i,1,z_phone_list[i])\r\nfor i in range(0, len(z_address_list), 1):\r\n sheet1.write(i, 2, z_address_list[i])\r\nfor i in range(0,len(z_rating_list)):\r\n sheet1.write(i,3,z_rating_list[i])\r\nfor i in range(0, len(z_costoftwo)):\r\n sheet1.write(i, 4, z_costoftwo[i])\r\nfor i in range(0, len(z_hours)):\r\n sheet1.write(i, 5, z_hours[i])\r\nfor i in range(0, len(z_votes)):\r\n sheet1.write(i, 6, z_votes[i])\r\n\r\nprint(\"Writing to excel Finished\")\r\nbook.save(\"ZomatoBhopal(data).xls\")\r\n\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]