code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
from django.db import models # Create your models here. class UserInfo(models.Model): uname = models.CharField('用户名', max_length=50, null=False) upassword = models.CharField('密码', max_length=200, null=False) email = models.CharField('邮箱', max_length=50, null=True) phone = models.CharField('手机号', max_length=20, null=False) time = models.DateTimeField('注册时间', auto_now=True) isban = models.BooleanField('禁用', default=False) isdelete = models.BooleanField('删除', default=False) def __str__(self): return self.uname class Meta: verbose_name = '用户' verbose_name_plural = verbose_name class Address(models.Model): aname = models.CharField('收货人', max_length=50, null=False) ads = models.CharField('地址', max_length=300, null=False) phone = models.CharField('电话', max_length=20, null=False) user = models.ForeignKey(UserInfo) def __str__(self): return self.aname class Meta: verbose_name = '收货地址' verbose_name_plural = verbose_name
normal
{ "blob_id": "dbec74ecf488ca98f3f441e252f79bc2bc0959c1", "index": 4068, "step-1": "<mask token>\n\n\nclass UserInfo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n", "step-2": "<mask token>\n\n\nclass UserInfo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n", "step-3": "<mask token>\n\n\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n", "step-4": "from django.db import models\n\n\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n", "step-5": "from django.db import models\n\n# Create your models here.\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import unittest from theoktany.serializers import serialize class SerializerTest(unittest.TestCase): class TestObject(object): def __init__(self, **kwargs): for name, value in kwargs.items(): self.__setattr__(name, value) def test_serialize(self): object_dict = {'firstName': 'Test', 'lastName': 'Test last'} json_str1 = '"firstName": "Test"' json_str2 = '"lastName": "Test last"' serialized_str = serialize(object_dict) self.assertIn(json_str1, serialized_str) self.assertIn(json_str2, serialized_str) def test_serialize_string(self): """Ensure that quotes are properly escaped""" string = 'This is a "string" with \'quotes.\'' json_string = '"{}"'.format(string.replace('"', '\\"')) self.assertEqual(serialize(string), json_string) def test_serialize_none(self): """Ensure that None gets serialized to 'null'""" self.assertEqual(serialize(None), 'null') def test_serialize_object(self): """Ensure that the serializer throws an error for an unserializable object""" test_obj = self.TestObject(prop1='x', prop2=1234) with self.assertRaises(TypeError): serialize(test_obj) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "4e4d6a9ed07aa03c79dade05e01f226017b13de5", "index": 9250, "step-1": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n <mask token>\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n <mask token>\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n <mask token>\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n\n def test_serialize_none(self):\n \"\"\"Ensure that None gets serialized to 'null'\"\"\"\n self.assertEqual(serialize(None), 'null')\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\n<mask token>\n", "step-4": "import unittest\nfrom theoktany.serializers import serialize\n\n\nclass SerializerTest(unittest.TestCase):\n\n\n class TestObject(object):\n\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def test_serialize(self):\n object_dict = {'firstName': 'Test', 'lastName': 'Test last'}\n json_str1 = '\"firstName\": \"Test\"'\n json_str2 = '\"lastName\": \"Test last\"'\n serialized_str = serialize(object_dict)\n self.assertIn(json_str1, serialized_str)\n self.assertIn(json_str2, serialized_str)\n\n def test_serialize_string(self):\n \"\"\"Ensure that quotes are properly escaped\"\"\"\n string = 'This is a \"string\" with \\'quotes.\\''\n json_string = '\"{}\"'.format(string.replace('\"', '\\\\\"'))\n self.assertEqual(serialize(string), json_string)\n\n def test_serialize_none(self):\n \"\"\"Ensure that None gets serialized to 'null'\"\"\"\n self.assertEqual(serialize(None), 'null')\n\n def test_serialize_object(self):\n \"\"\"Ensure that the serializer throws an error for an unserializable object\"\"\"\n test_obj = self.TestObject(prop1='x', prop2=1234)\n with self.assertRaises(TypeError):\n serialize(test_obj)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": null, "step-ids": [ 3, 4, 5, 7 ] }
[ 3, 4, 5, 7 ]
import django.dispatch property_viewed = django.dispatch.Signal(providing_args=["property","user", "request", "response"])
normal
{ "blob_id": "00099cab0c816c76fc0fa94d7905175feb6919cf", "index": 9795, "step-1": "<mask token>\n", "step-2": "<mask token>\nproperty_viewed = django.dispatch.Signal(providing_args=['property', 'user',\n 'request', 'response'])\n", "step-3": "import django.dispatch\nproperty_viewed = django.dispatch.Signal(providing_args=['property', 'user',\n 'request', 'response'])\n", "step-4": "import django.dispatch\n\nproperty_viewed = django.dispatch.Signal(providing_args=[\"property\",\"user\", \"request\", \"response\"])", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from helper import * async def main(URL, buy_time): browser, page = await get_window() # 30s登陆时间 await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180') await asyncio.sleep(30) # 选款式时间10s await page.goto(URL) await asyncio.sleep(10) await sleep_time(buy_time) old_url = page.url #加入购物车 while True: index = 0 try: print(f'重试 {index}') # 找到“加入购物车”,点击 await page.click('[class="btn btn-primary"]') break except: index += 1 await asyncio.sleep(CLICK_FREQUENCY) # 等待页面跳转 while True: if page.url != old_url: break await asyncio.sleep(CLICK_FREQUENCY) while True: try: # 找到“进入购物车”,点击 await page.click('[class="btn btn-primary"]') break except: await asyncio.sleep(CLICK_FREQUENCY) # 付款 await asyncio.sleep(100) await close_window(browser) if __name__ == '__main__': URL = input('宝贝链接:\n') buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\n') asyncio.run(main(URL, buy_time))
normal
{ "blob_id": "1e87f625fb7bd9f9bf4233229332c909702954a5", "index": 4334, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n", "step-3": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n", "step-4": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n # 30s登陆时间\n await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180')\n await asyncio.sleep(30)\n\n # 选款式时间10s\n await page.goto(URL)\n await asyncio.sleep(10)\n\n await sleep_time(buy_time)\n old_url = page.url\n\n #加入购物车\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n # 找到“加入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n\n # 等待页面跳转\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n\n while True:\n try:\n # 找到“进入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n # 付款\n await asyncio.sleep(100)\n await close_window(browser)\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from pkg.models.board import Board class BaseAI: _board: Board = None def __init__(self, board=None): if board is not None: self.set_board(board) def set_board(self, board): self._board = board def find_move(self, for_player): pass
normal
{ "blob_id": "b794a4cca3303ac7440e9aad7bc210df62648b51", "index": 5476, "step-1": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n <mask token>\n\n def find_move(self, for_player):\n pass\n", "step-3": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n <mask token>\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n", "step-4": "<mask token>\n\n\nclass BaseAI:\n _board: Board = None\n\n def __init__(self, board=None):\n if board is not None:\n self.set_board(board)\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n", "step-5": "from pkg.models.board import Board\n\n\nclass BaseAI:\n _board: Board = None\n\n def __init__(self, board=None):\n if board is not None:\n self.set_board(board)\n\n def set_board(self, board):\n self._board = board\n\n def find_move(self, for_player):\n pass\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
''' This script will do auto-check in/out for ZMM100 fingerprint access control device by ZKSoftware. At my office, the manager uses an application to load data from the fingerprint device. After he loads data, log in device's database is cleared. So in my case, I write this script to automate checking in/out everyday. Device is running linux with busybox, so I have access to ftpput, ftpget and wget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db. This is a sqlite3 database file. User info is in USER_INFO, user transactions are in ATT_LOG table. Procedure: - telnet into the device - ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server - edit ZKDB.db file on server - ftpget ZKDB.db from FTP server ''' import argparse import datetime import os import random import sqlite3 import subprocess as spr import sys import telnetlib def get_server_ip(device_ip): import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((device_ip, 80)) return s.getsockname()[0] def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'): ''' Transfer file from from_ip to to_ip via telnet. Use ftpput and ftpget. ''' # ====FTP Server==== try: import pyftpdlib except ImportError: import pip pip.main('install pyftpdlib'.split()) # start pyftpdlib FTP server: anonymous with write permission, port 2121 ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w']) print('Server started') filename = os.path.basename(remote_file_path) s = telnetlib.Telnet(DEVICE_IP) print(s.read_until(b'login: ').decode()) s.write(b'root \n') print(s.read_until(b'Password: ').decode()) s.write(b'solokey\n') if s.read_until(b'#'): s.write(bytes('ls %s\n' % DB_PATH, 'utf-8')) files = s.read_until(b'#').decode() if filename in files: while True: if cmd == 'ftpput': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, filename, remote_file_path), 'utf-8') elif cmd == 'ftpget': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8') else: raise ValueError('cmd must be `ftpput` or `ftpget`') s.write(command) ret = s.read_until(b'#').decode() if 'refused' not in ret: print(ret) break # stop pyftpdlib FTP server ftp_server.kill() print('Server killed') def generate_verify_time(status='in', late=False): ''' Generate normal verify time based on status `in` or `out` `in` time will be random 10 mins before 8:00 `out` time will be random 10 mins after 17:00 ''' if status == 'in': status = 0 if not late: hour = 7 minute = random.randint(50, 59) else: hour = 8 minute = random.randint(15, 20) elif status == 'out': status = 1 hour = 17 minute = random.randint(0, 10) else: raise ValueError('status must be `in` or `out`') second = random.randint(0, 59) time = datetime.time(hour, minute, second) return time def add_log(uid, date, status, late=False): ''' Edit ZKDB.db file, ATT_LOG table, insert a row which represents a check in/out log uid: User PIN date: follow format: dd/mm/yyyy - 14/01/2017 status: 'in' is checking in, 'out' is checking out ''' # verify_type: 0 is password, 1 is fingerprint verify_type = 1 if status == 'in': status = 0 time = generate_verify_time('in', late=late) elif status == 'out': status = 1 time = generate_verify_time('out') else: raise ValueError('status must be `in` or `out`') date = datetime.datetime.strptime(date, '%d/%m/%Y') combined = datetime.datetime.combine(date, time) verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined) with sqlite3.connect(DB) as conn: query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, ' 'Status, Work_Code_ID, SEND_FLAG) ' 'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type, verify_time, status, 0, 0) cur = conn.execute(query) cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG') r = cur.fetchone() print_log(r, uid, verify_type, verify_time, status) def add_logs(uid, start, end, status, late=False): start_date = datetime.datetime.strptime(start, '%d/%m/%Y') end_date = datetime.datetime.strptime(end, '%d/%m/%Y') day_count = end_date - start_date day_count = day_count.days + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date) add_log(uid, date, status, late) def delete_log(log_id): ''' Delete a log row with ID=log_id ''' with sqlite3.connect(DB) as conn: query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)) conn.execute(query) print('Deleted log {}'.format(log_id)) def get_logs(uid, start_date, end_date): ''' Returns logs of 'uid' from 'start_date' to 'end_date' uid: User PIN start_date: follow format 14/01/2017 end_date: follow format 15/01/2017 Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y') end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y') with sqlite3.connect(DB) as conn: query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status ' 'FROM ATT_LOG WHERE User_PIN = {}'.format(uid)) cur = conn.execute(query) rows = cur.fetchall() ret = [] for row in rows: log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S') if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1): ret.append(row) return ret def get_logs_by_date(uid, date): return get_logs(uid, date, date) def print_log(*log_row): ''' Pretty print a log row log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' id, uid, verify_type, verify_time, status = log_row if status == 1: status = 'Check out' elif status == 0: status = 'Check in' print('{}. {} {} at {}'.format(id, uid, status, verify_time)) def check_log_row(log_row): ''' Each day must have exactly 2 logs. One for checking in, before 8:00:00 One for checking out, after 17:00:00 Return True if satisfies all conditions, else False ''' in_time = datetime.time(8, 0, 0) out_time = datetime.time(17, 0, 0) log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S') status = log_row[-1] if status == 1 and log_date.time() < out_time: print('Early log on {}: {}'.format(log_date.date(), log_date)) return False elif status == 0 and log_date.time() > in_time: print('Late log on {}: {}'.format(log_date.date(), log_date)) return False else: return True def check_log_by_date(uid, date): pass def fix_logs(uid, start_date, end_date): ''' Fix logs of uid from start_date to end_date A normalized log contains 2 logs per day One check in log before 8:00 One check out log after 17:00 ''' start_date = '{:%d/%m/%Y}'.format(start_date) end_date = '{:%d/%m/%Y}'.format(end_date) day_count = (end_date - start_date) + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date.date) logs = get_logs_by_date(uid, date) if len(logs) == 2: if not check_log_row(logs[0]) or not check_log_row(logs[1]): delete_log(logs[0][0]) delete_log(logs[1][0]) add_log(uid, date, 'in') add_log(uid, date, 'out') elif len(logs) == 0: add_log(uid, date, 'in') add_log(uid, date, 'out') else: for log in logs: delete_log(log[0]) add_log(uid, date, 'in') add_log(uid, date, 'out') def main(): today = '{:%d/%m/%Y}'.format(datetime.date.today()) parser = argparse.ArgumentParser() parser.add_argument('action', help='`get`, `checkin`, `checkout`, ' '`add` or `fix` logs', default='get') parser.add_argument('uids', help='User PINs', type=int, nargs='*') parser.add_argument('-d', '--date', help='Date', default=today) parser.add_argument('-r', '--range', help='Range of date, ex. 01/01/2017-02/01/2017') parser.add_argument('--log', help='log id to delete') parser.add_argument('--late', help='Checkin late or not', action='store_true') args = parser.parse_args() uids = args.uids date = args.date or today if not args.range: start, end = date, date else: start, end = args.range.split('-') transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput') for uid in uids: if args.action == 'get': logs = get_logs(uid, start, end) for log in logs: print_log(*log) elif args.action == 'checkin': add_logs(uid, start, end, 'in', late=args.late) elif args.action == 'checkout': add_logs(uid, start, end, 'out') elif args.action == 'add': add_log(uid, start, end) elif args.action == 'fix': fix_logs(uid, start, end) elif args.action == 'delete': delete_log(args.log) else: raise ValueError('Action must be `get`, `checkin`, `checkout`, ' '`fix` or `delete`') transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget') if __name__ == '__main__': # ====config==== DEVICE_IP = '10.0.0.204' # todo: find IP, input IP DB_PATH = '/mnt/mtdblock/data/ZKDB.db' DB = os.path.basename(DB_PATH) server_ip = get_server_ip(DEVICE_IP) main()
normal
{ "blob_id": "3d1e6be71f92910cdc9eb2bf60ea7f8f1187f706", "index": 3698, "step-1": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\n<mask token>\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\n<mask token>\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\n<mask token>\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n \"\"\"\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n with sqlite3.connect(DB) as conn:\n query = (\n 'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'\n .format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if (log_date >= start_date and log_date <= end_date + datetime.\n timedelta(days=1)):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n \"\"\"\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n with sqlite3.connect(DB) as conn:\n query = (\n 'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'\n .format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if (log_date >= start_date and log_date <= end_date + datetime.\n timedelta(days=1)):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\nif __name__ == '__main__':\n DEVICE_IP = '10.0.0.204'\n DB_PATH = '/mnt/mtdblock/data/ZKDB.db'\n DB = os.path.basename(DB_PATH)\n server_ip = get_server_ip(DEVICE_IP)\n main()\n", "step-5": "'''\nThis script will do auto-check in/out for ZMM100 fingerprint access control\ndevice by ZKSoftware.\n\nAt my office, the manager uses an application to load data from the\nfingerprint device. After he loads data, log in device's database is cleared.\nSo in my case, I write this script to automate checking in/out everyday.\n\nDevice is running linux with busybox, so I have access to ftpput, ftpget and\nwget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db.\nThis is a sqlite3 database file. User info is in USER_INFO, user transactions\nare in ATT_LOG table.\n\nProcedure:\n- telnet into the device\n- ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server\n- edit ZKDB.db file on server\n- ftpget ZKDB.db from FTP server\n'''\nimport argparse\nimport datetime\nimport os\nimport random\nimport sqlite3\nimport subprocess as spr\nimport sys\nimport telnetlib\n\n\ndef get_server_ip(device_ip):\n import socket\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n '''\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n '''\n\n # ====FTP Server====\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n\n # start pyftpdlib FTP server: anonymous with write permission, port 2121\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd, server_ip,\n filename,\n remote_file_path),\n 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n\n # stop pyftpdlib FTP server\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n '''\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n '''\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n\n return time\n\n\ndef add_log(uid, date, status, late=False):\n '''\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n '''\n # verify_type: 0 is password, 1 is fingerprint\n verify_type = 1\n\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n\n with sqlite3.connect(DB) as conn:\n query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '\n 'Status, Work_Code_ID, SEND_FLAG) '\n 'VALUES ({}, {}, \"{}\", {}, 0, 0)').format(uid, verify_type,\n verify_time, status,\n 0, 0)\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n\n print_log(r, uid, verify_type, verify_time, status)\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n '''\n Delete a log row with ID=log_id\n '''\n with sqlite3.connect(DB) as conn:\n query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n '''\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n '''\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n\n with sqlite3.connect(DB) as conn:\n query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '\n 'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n '''\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n '''\n id, uid, verify_type, verify_time, status = log_row\n\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n '''\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n '''\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n '''\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n '''\n\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = (end_date - start_date) + 1\n\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help='`get`, `checkin`, `checkout`, '\n '`add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range',\n help='Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not',\n action='store_true')\n\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError('Action must be `get`, `checkin`, `checkout`, '\n '`fix` or `delete`')\n\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\nif __name__ == '__main__':\n # ====config====\n DEVICE_IP = '10.0.0.204' # todo: find IP, input IP\n DB_PATH = '/mnt/mtdblock/data/ZKDB.db'\n DB = os.path.basename(DB_PATH)\n server_ip = get_server_ip(DEVICE_IP)\n\n main()\n", "step-ids": [ 6, 12, 13, 14, 16 ] }
[ 6, 12, 13, 14, 16 ]
#!/usr/bin/python3 def add_tuple(tuple_a=(), tuple_b=()): if len(tuple_a) < 1: a_x = 0 else: a_x = tuple_a[0] if len(tuple_a) < 2: a_y = 0 else: a_y = tuple_a[1] if len(tuple_b) < 1: b_x = 0 else: b_x = tuple_b[0] if len(tuple_b) < 2: b_y = 0 else: b_y = tuple_b[1] a = a_x + b_x b = a_y + b_y tuple_c = (a, b) return tuple_c
normal
{ "blob_id": "1522ebb52504f7f27a526b597fe1e262bbcbfbb0", "index": 4429, "step-1": "<mask token>\n", "step-2": "def add_tuple(tuple_a=(), tuple_b=()):\n if len(tuple_a) < 1:\n a_x = 0\n else:\n a_x = tuple_a[0]\n if len(tuple_a) < 2:\n a_y = 0\n else:\n a_y = tuple_a[1]\n if len(tuple_b) < 1:\n b_x = 0\n else:\n b_x = tuple_b[0]\n if len(tuple_b) < 2:\n b_y = 0\n else:\n b_y = tuple_b[1]\n a = a_x + b_x\n b = a_y + b_y\n tuple_c = a, b\n return tuple_c\n", "step-3": "#!/usr/bin/python3\ndef add_tuple(tuple_a=(), tuple_b=()):\n if len(tuple_a) < 1:\n a_x = 0\n else:\n a_x = tuple_a[0]\n if len(tuple_a) < 2:\n a_y = 0\n else:\n a_y = tuple_a[1]\n if len(tuple_b) < 1:\n b_x = 0\n else:\n b_x = tuple_b[0]\n if len(tuple_b) < 2:\n b_y = 0\n else:\n b_y = tuple_b[1]\n a = a_x + b_x\n b = a_y + b_y\n tuple_c = (a, b)\n return tuple_c\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# from cis_dna import import cis_config as conf import protocol_pb2 as proto import uuid import random import math import dna_decoding import numpy as np def move(cell): pass def is_alive(cell): starvation_threshold = conf.ENERGY_THRESHOLD if cell.energy_level < starvation_threshold: return False else: return True def builds_connection_after_division(cell): return dna_decoding.builds_connection_after_division( cell.dna, len(cell.connections)) def dna_copy_or_sub_slice(cell): if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)): return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections)) return cell.dna def divide(cell): initial_energy = int(dna_decoding.initial_energy(cell.dna)) cost = conf.DIVISION_ENERGY_COST + initial_energy if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost: cell.energy_level -= cost child_id = str(uuid.uuid1()) child_connections = [] if builds_connection_after_division(cell): child_connections.append(proto.Connection(connected_to=cell.id)) conn = cell.connections.add() conn.connected_to = child_id child_dna = dna_decoding.mutate_dna_with_chance( dna_copy_or_sub_slice(cell), conf.MUTATION_CHANCE ) new_cell = proto.Cell( id=child_id, energy_level=initial_energy, pos=randomly_shifted_pos(cell.pos, 10), vel=proto.Vector( x=0, y=0, z=0), dna=child_dna, connections=child_connections) return new_cell def randomly_shifted_pos(pos, shift_dist): d_x, d_y, d_z = random_vector_of_length(shift_dist) return proto.Vector( x=pos.x + d_x, y=pos.y + d_y, z=pos.z + d_z, ) def random_vector_of_length(l): vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1 dist = np.sqrt(vec.dot(vec)) factor = l / dist return vec
normal
{ "blob_id": "de557c3c1455acc0a3facfca5729a010f3d123dc", "index": 4208, "step-1": "<mask token>\n\n\ndef is_alive(cell):\n starvation_threshold = conf.ENERGY_THRESHOLD\n if cell.energy_level < starvation_threshold:\n return False\n else:\n return True\n\n\n<mask token>\n\n\ndef dna_copy_or_sub_slice(cell):\n if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):\n return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))\n return cell.dna\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef move(cell):\n pass\n\n\ndef is_alive(cell):\n starvation_threshold = conf.ENERGY_THRESHOLD\n if cell.energy_level < starvation_threshold:\n return False\n else:\n return True\n\n\ndef builds_connection_after_division(cell):\n return dna_decoding.builds_connection_after_division(cell.dna, len(cell\n .connections))\n\n\ndef dna_copy_or_sub_slice(cell):\n if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):\n return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))\n return cell.dna\n\n\ndef divide(cell):\n initial_energy = int(dna_decoding.initial_energy(cell.dna))\n cost = conf.DIVISION_ENERGY_COST + initial_energy\n if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost:\n cell.energy_level -= cost\n child_id = str(uuid.uuid1())\n child_connections = []\n if builds_connection_after_division(cell):\n child_connections.append(proto.Connection(connected_to=cell.id))\n conn = cell.connections.add()\n conn.connected_to = child_id\n child_dna = dna_decoding.mutate_dna_with_chance(dna_copy_or_sub_slice\n (cell), conf.MUTATION_CHANCE)\n new_cell = proto.Cell(id=child_id, energy_level=initial_energy, pos\n =randomly_shifted_pos(cell.pos, 10), vel=proto.Vector(x=0, y=0,\n z=0), dna=child_dna, connections=child_connections)\n return new_cell\n\n\n<mask token>\n\n\ndef random_vector_of_length(l):\n vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1\n dist = np.sqrt(vec.dot(vec))\n factor = l / dist\n return vec\n", "step-3": "<mask token>\n\n\ndef move(cell):\n pass\n\n\ndef is_alive(cell):\n starvation_threshold = conf.ENERGY_THRESHOLD\n if cell.energy_level < starvation_threshold:\n return False\n else:\n return True\n\n\ndef builds_connection_after_division(cell):\n return dna_decoding.builds_connection_after_division(cell.dna, len(cell\n .connections))\n\n\ndef dna_copy_or_sub_slice(cell):\n if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):\n return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))\n return cell.dna\n\n\ndef divide(cell):\n initial_energy = int(dna_decoding.initial_energy(cell.dna))\n cost = conf.DIVISION_ENERGY_COST + initial_energy\n if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost:\n cell.energy_level -= cost\n child_id = str(uuid.uuid1())\n child_connections = []\n if builds_connection_after_division(cell):\n child_connections.append(proto.Connection(connected_to=cell.id))\n conn = cell.connections.add()\n conn.connected_to = child_id\n child_dna = dna_decoding.mutate_dna_with_chance(dna_copy_or_sub_slice\n (cell), conf.MUTATION_CHANCE)\n new_cell = proto.Cell(id=child_id, energy_level=initial_energy, pos\n =randomly_shifted_pos(cell.pos, 10), vel=proto.Vector(x=0, y=0,\n z=0), dna=child_dna, connections=child_connections)\n return new_cell\n\n\ndef randomly_shifted_pos(pos, shift_dist):\n d_x, d_y, d_z = random_vector_of_length(shift_dist)\n return proto.Vector(x=pos.x + d_x, y=pos.y + d_y, z=pos.z + d_z)\n\n\ndef random_vector_of_length(l):\n vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1\n dist = np.sqrt(vec.dot(vec))\n factor = l / dist\n return vec\n", "step-4": "import cis_config as conf\nimport protocol_pb2 as proto\nimport uuid\nimport random\nimport math\nimport dna_decoding\nimport numpy as np\n\n\ndef move(cell):\n pass\n\n\ndef is_alive(cell):\n starvation_threshold = conf.ENERGY_THRESHOLD\n if cell.energy_level < starvation_threshold:\n return False\n else:\n return True\n\n\ndef builds_connection_after_division(cell):\n return dna_decoding.builds_connection_after_division(cell.dna, len(cell\n .connections))\n\n\ndef dna_copy_or_sub_slice(cell):\n if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):\n return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))\n return cell.dna\n\n\ndef divide(cell):\n initial_energy = int(dna_decoding.initial_energy(cell.dna))\n cost = conf.DIVISION_ENERGY_COST + initial_energy\n if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost:\n cell.energy_level -= cost\n child_id = str(uuid.uuid1())\n child_connections = []\n if builds_connection_after_division(cell):\n child_connections.append(proto.Connection(connected_to=cell.id))\n conn = cell.connections.add()\n conn.connected_to = child_id\n child_dna = dna_decoding.mutate_dna_with_chance(dna_copy_or_sub_slice\n (cell), conf.MUTATION_CHANCE)\n new_cell = proto.Cell(id=child_id, energy_level=initial_energy, pos\n =randomly_shifted_pos(cell.pos, 10), vel=proto.Vector(x=0, y=0,\n z=0), dna=child_dna, connections=child_connections)\n return new_cell\n\n\ndef randomly_shifted_pos(pos, shift_dist):\n d_x, d_y, d_z = random_vector_of_length(shift_dist)\n return proto.Vector(x=pos.x + d_x, y=pos.y + d_y, z=pos.z + d_z)\n\n\ndef random_vector_of_length(l):\n vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1\n dist = np.sqrt(vec.dot(vec))\n factor = l / dist\n return vec\n", "step-5": "# from cis_dna import\nimport cis_config as conf\nimport protocol_pb2 as proto\nimport uuid\nimport random\nimport math\nimport dna_decoding\nimport numpy as np\n\n\ndef move(cell):\n pass\n\n\ndef is_alive(cell):\n starvation_threshold = conf.ENERGY_THRESHOLD\n if cell.energy_level < starvation_threshold:\n return False\n else:\n return True\n\n\ndef builds_connection_after_division(cell):\n return dna_decoding.builds_connection_after_division(\n cell.dna, len(cell.connections))\n\n\ndef dna_copy_or_sub_slice(cell):\n if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):\n return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))\n return cell.dna\n\n\ndef divide(cell):\n initial_energy = int(dna_decoding.initial_energy(cell.dna))\n cost = conf.DIVISION_ENERGY_COST + initial_energy\n if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost:\n cell.energy_level -= cost\n\n child_id = str(uuid.uuid1())\n child_connections = []\n\n if builds_connection_after_division(cell):\n child_connections.append(proto.Connection(connected_to=cell.id))\n conn = cell.connections.add()\n conn.connected_to = child_id\n\n child_dna = dna_decoding.mutate_dna_with_chance(\n dna_copy_or_sub_slice(cell),\n conf.MUTATION_CHANCE\n )\n\n new_cell = proto.Cell(\n id=child_id,\n energy_level=initial_energy,\n pos=randomly_shifted_pos(cell.pos, 10),\n vel=proto.Vector(\n x=0,\n y=0,\n z=0),\n dna=child_dna,\n connections=child_connections)\n return new_cell\n\n\ndef randomly_shifted_pos(pos, shift_dist):\n d_x, d_y, d_z = random_vector_of_length(shift_dist)\n return proto.Vector(\n x=pos.x + d_x,\n y=pos.y + d_y,\n z=pos.z + d_z,\n )\n\n\ndef random_vector_of_length(l):\n vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1\n dist = np.sqrt(vec.dot(vec))\n factor = l / dist\n return vec\n", "step-ids": [ 2, 6, 7, 8, 9 ] }
[ 2, 6, 7, 8, 9 ]
#!/usr/bin/env python # coding=utf-8 import sys,os dir = '/home/ellen/yjoqm/fdfs_client/pic' def scp_file(filename): cmd = 'scp [email protected]:/home/ellen/yjoqm/fdfs_client/pic/%s .' %filename os.system(cmd) def main(args): args = sys.argv[1] scp_file(args) print 'done~~~~' if __name__ == '__main__': args = sys.argv if len(args) < 1: print 'usage: python scp_file xxxx' sys.exit(2) main(args)
normal
{ "blob_id": "e2489f9d3041c45129fdd71da6652a6093c96d2d", "index": 8487, "step-1": "#!/usr/bin/env python\n# coding=utf-8\nimport sys,os\n\ndir = '/home/ellen/yjoqm/fdfs_client/pic'\n\ndef scp_file(filename):\n cmd = 'scp [email protected]:/home/ellen/yjoqm/fdfs_client/pic/%s .' %filename\n os.system(cmd)\n\ndef main(args):\n args = sys.argv[1]\n \n scp_file(args)\n print 'done~~~~'\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) < 1:\n print 'usage: python scp_file xxxx'\n sys.exit(2)\n main(args)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import torch import re import sys import os import shutil import filecmp import numpy as np from collections import defaultdict from shutil import copyfile sys.path.append('../') class BoardParser: def __init__(self): self.file = open('../board_output', 'rb') self.data = None def update(self): s = self.file.read() if len(s) == 200: self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10) self.file.seek(0) class StatusParser: def __init__(self): self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10)) self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, )) self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, )) self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, )) self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, )) class Parser: def __init__(self, filename): self.filename = filename self.last_update = -1 def check_update(self): latest_update = os.path.getmtime(self.filename) if latest_update > self.last_update: self.last_update = latest_update self.parse() return True return False def parse(self): score_re = 'Episode:\s*(?P<episode>\d*)\s*' \ 'Score:\s*(?P<score>\d*)\s*' \ 'Lines Cleared:\s*(?P<lines>\d*)' train_re = 'Iteration:\s*(?P<iter>\d*)\s*' \ 'training loss:\s*(?P<t_loss>\d*\.\d*)\s*' \ 'validation loss:\s*(?P<v_loss>\d*\.\d*)±\s*(?P<v_loss_err>\d*\.\d*|nan)\s*' \ 'gradient norm:\s*(?P<g_norm>\d*\.\d*)' datasize_re = 'Training data size:\s*(?P<tsize>\d*)\s*' \ 'Validation data size:\s*(?P<vsize>\d*)' queue_re = 'Memory usage: (?P<filled>\d*) / (?P<size>\d*).*' self.data = defaultdict(list) size = 0 filled = 0 rm_since_last_game = 0 with open(self.filename) as f: lc_avg_tmp = [] sc_avg_tmp = [] data_accum = 0 training = False for line in f.readlines(): match_score_re = re.search(score_re, line) match_train_re = re.search(train_re, line) match_datasize_re = re.search(datasize_re, line) match_queue_re = re.search(queue_re, line) if match_score_re: d = match_score_re.groupdict() lc = int(d['lines']) sc = int(d['score']) self.data['line_cleared'].append(lc) self.data['score'].append(sc) self.data['data_accumulated'].append(data_accum) lc_avg_tmp.append(lc) sc_avg_tmp.append(sc) rm_since_last_game = 0 elif match_train_re: d = match_train_re.groupdict() self.data['training_loss'].append(float(d['t_loss'])) self.data['validation_loss'].append(float(d['v_loss'])) if d['v_loss_err'] == 'nan': self.data['validation_loss_err'].append(0) else: self.data['validation_loss_err'].append(float(d['v_loss_err'])) self.data['g_norm'].append(float(d['g_norm'])) #print(d['g_norm']) elif match_datasize_re: d = match_datasize_re.groupdict() tsize = int(d['tsize']) vsize = int(d['vsize']) data_accum += (tsize + vsize) elif match_queue_re: d = match_queue_re.groupdict() filled = int(d['filled']) size = int(d['size']) elif 'REMOVING UNUSED' in line: rm_since_last_game += 1 elif 'proceed to training' in line: training = True if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) lc_avg_tmp.clear() else: if self.data['line_cleared_per_train']: self.data['line_cleared_per_train'].append( self.data['line_cleared_per_train'][-1]) else: self.data['line_cleared_per_train'].append((0, 0)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) sc_avg_tmp.clear() else: if self.data['score_per_train']: self.data['score_per_train'].append( self.data['score_per_train'][-1]) else: self.data['score_per_train'].append((0, 0)) elif 'Training complete' in line: training = False if lc_avg_tmp: mean = np.average(lc_avg_tmp) std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp)) self.data['line_cleared_per_train'].append((mean, std)) if sc_avg_tmp: mean = np.average(sc_avg_tmp) std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp)) self.data['score_per_train'].append((mean, std)) if not training: flocal = './model_checkpoint' ftarget = '../pytorch_model/model_checkpoint' ex_local = os.path.isfile(flocal) ex_target = os.path.isfile(ftarget) if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local): copyfile(ftarget, flocal) self.data['filled'] = filled self.data['size'] = size self.data['rm_since_last_game'] = rm_since_last_game class ModelParser: def __init__(self, distributional=True): self.last_update = -1 self.data = {} self.distributional = distributional def check_update(self): flocal = './model_checkpoint' if os.path.isfile(flocal): latest = os.path.getmtime(flocal) if latest > self.last_update: print('New model found, updating...', flush=True) self.last_update = latest state = torch.load(flocal, map_location=torch.device('cpu')) model_state = state['model_state_dict'] self.parse_state(model_state) return True return False def parse(self, model): self.parse_state(model.state_dict()) def parse_state(self, model_state): self.data = {} for k, v in model_state.items(): if 'weight' in k: k = k.replace('.weight', '') k = k.replace('seq.', '') self.data[k] = v.cpu().numpy().ravel()
normal
{ "blob_id": "3668e8009dca4ea261bdfbd325331c338fdac5a9", "index": 627, "step-1": "<mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-2": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n <mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-3": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-4": "<mask token>\nsys.path.append('../')\n\n\nclass BoardParser:\n\n def __init__(self):\n self.file = open('../board_output', 'rb')\n self.data = None\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-5": "import torch\nimport re\nimport sys\nimport os\nimport shutil\nimport filecmp\nimport numpy as np\nfrom collections import defaultdict\nfrom shutil import copyfile\n\nsys.path.append('../')\n\n\nclass BoardParser:\n def __init__(self):\n\n self.file = open('../board_output', 'rb')\n\n self.data = None\n\n def update(self):\n\n s = self.file.read()\n\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n\n self.file.seek(0)\n\n\nclass StatusParser:\n def __init__(self):\n\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, ))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, ))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, ))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, ))\n\n\nclass Parser:\n def __init__(self, filename):\n\n self.filename = filename\n\n self.last_update = -1\n\n def check_update(self):\n\n latest_update = os.path.getmtime(self.filename)\n\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = 'Episode:\\s*(?P<episode>\\d*)\\s*' \\\n 'Score:\\s*(?P<score>\\d*)\\s*' \\\n 'Lines Cleared:\\s*(?P<lines>\\d*)'\n train_re = 'Iteration:\\s*(?P<iter>\\d*)\\s*' \\\n 'training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*' \\\n 'validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*' \\\n 'gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'\n datasize_re = 'Training data size:\\s*(?P<tsize>\\d*)\\s*' \\\n 'Validation data size:\\s*(?P<vsize>\\d*)'\n queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'\n\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d['v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n #print(d['g_norm'])\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += (tsize + vsize)\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n else:\n if self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(\n self.data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n else:\n if self.data['score_per_train']:\n self.data['score_per_train'].append(\n self.data['score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n\n if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local):\n copyfile(ftarget, flocal)\n\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n def __init__(self, distributional=True):\n\n self.last_update = -1\n\n self.data = {}\n\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n", "step-ids": [ 11, 12, 13, 15, 17 ] }
[ 11, 12, 13, 15, 17 ]
"""main.py""" import tkinter as tk from tkinter import ttk from ttkthemes import ThemedStyle import wikipedia as wk from newsapi import NewsApiClient as nac import datetime import random class MainWindow: """Application controller object.""" def __init__(self): self.p = None self.main_page = tk.Tk() self.main_page.title("MetaWikipedia") self.main_page.geometry("500x500") self.style = ThemedStyle(self.main_page) self.style.set_theme("scidblue") self.left_pane = ttk.PanedWindow(self.main_page) self.right_pane = ttk.PanedWindow(self.main_page) # Left pane self.search = ttk.Button(self.left_pane, text="Search", command=self.search_wikipedia) self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5) self.randomize_but = ttk.Button(self.left_pane, text="Randomize", command=self.randomize) self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5) self.search_box = tk.Text(self.left_pane) self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1) self.summary = tk.Text(self.left_pane, wrap=tk.WORD) self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1) extra_list_choices = ["none", "categories", "pageid", "sections", "html"] self.extra_list_choice = tk.StringVar() self.extra_list_choice.set("none") self.extra_list = ttk.OptionMenu( self.left_pane, self.extra_list_choice, *extra_list_choices, command=self.update_choice ) self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1) self.other_text = tk.Text(self.left_pane) self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1) # Right pane self.api_key_label = ttk.Label(self.right_pane, text="API Key") self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4) self.api_key_entry = ttk.Entry(self.right_pane, text="ABC...") self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6) self.news_box = tk.Text(self.right_pane) self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1) self.top_categories_label = ttk.Label(self.right_pane, text="Top Categories") self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1) self.top_categories = tk.Text(self.right_pane) self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1) self.category_map = {} self.randomize() self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5) self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5) self.main_page.mainloop() def search_wikipedia(self): """Safely browse wikipedia articles.""" self.summary.delete('1.0', tk.END) possibilities = wk.search(self.search_box.get('1.0',tk.END).replace("\n","")) if len(possibilities) > 0: try: p = wk.page(possibilities[0]) except wk.DisambiguationError as e: p = wk.page(e.options[0]) self.summary.configure(state="normal") self.summary.delete('1.0', tk.END) self.summary.insert('1.0', p.summary) self.summary.configure(state="disabled") self.p = p self.update_category_map(p.categories) self.get_news() return None def update_choice(self, value): """Update box based on menu choice.""" if self.p is not None: if value == "none": self.other_text.delete('1.0', tk.END) self.other_text.insert('1.0', "") if value == "categories": self.other_text.delete('1.0', tk.END) self.other_text.insert('1.0', self.p.categories) if value == "pageid": self.other_text.delete('1.0', tk.END) self.other_text.insert('1.0', self.p.pageid) if value == "sections": self.other_text.delete('1.0', tk.END) self.other_text.insert('1.0', self.p.sections) if value == "html": self.other_text.delete('1.0', tk.END) self.other_text.insert('1.0', self.p.html()) def randomize(self): """Randomize wikipedia article.""" self.search_box.delete('1.0', tk.END) self.search_box.insert('1.0', wk.random()) self.search_wikipedia() def update_category_map(self, category_list): """Update the category map after a search.""" for category in category_list: skip = False for i in ["wiki", "sources", "article", "stub", "wayback", "cs1"]: if i in category.lower(): skip = True if skip: continue if category in self.category_map: self.category_map[category] += 1 else: self.category_map[category] = 1 self.update_top_categories() def update_top_categories(self): """Update the top categories text box.""" cats = self.sorted_categories() display = "" for cat in cats: hit = "hits" if self.category_map[cat] > 1 else "hit" display += f"{cat}, {self.category_map[cat]} {hit}\n" self.top_categories.configure(state="normal") self.top_categories.delete('1.0', tk.END) self.top_categories.insert('1.0', display) self.top_categories.configure(state="disabled") def sorted_categories(self): """Sort categories by hits.""" count = lambda category: self.category_map[category] l = sorted(self.category_map, key=count, reverse=True) if len(l) > 5: return l[:5] else: return l def get_news(self): """Get news using News API.""" if self.api_key_entry.get() == "": return None api = nac(api_key=self.api_key_entry.get()) now = datetime.datetime.utcnow() two_weeks = (now-datetime.timedelta(days=14)) #today = now.strftime() query = "" for cat in self.sorted_categories(): query += f"{cat}," search = api.get_top_headlines(q=query, sources="bbc-news,the-verge", language="en") news = "" for article in search["articles"]: news += f"{search['articles'][article]['title']}\n" self.news_box.delete('1.0', tk.END) self.news_box.insert('1.0', news) if __name__ == "__main__": main_window = MainWindow()
normal
{ "blob_id": "874fa927a1c0f1beeb31ca7b0de7fd2b16218ea4", "index": 2756, "step-1": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n <mask token>\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n", "step-4": "<mask token>\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n", "step-5": "\"\"\"main.py\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n \n def __init__(self):\n self.p = None\n \n self.main_page = tk.Tk()\n self.main_page.title(\"MetaWikipedia\")\n self.main_page.geometry(\"500x500\")\n\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme(\"scidblue\")\n\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n\n # Left pane\n self.search = ttk.Button(self.left_pane, text=\"Search\", command=self.search_wikipedia)\n self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)\n\n self.randomize_but = ttk.Button(self.left_pane, text=\"Randomize\", command=self.randomize)\n self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)\n\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)\n\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)\n\n extra_list_choices = [\"none\", \"categories\", \"pageid\", \"sections\", \"html\"]\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set(\"none\")\n self.extra_list = ttk.OptionMenu(\n self.left_pane,\n self.extra_list_choice,\n *extra_list_choices,\n command=self.update_choice\n )\n self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)\n\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)\n\n\n # Right pane\n self.api_key_label = ttk.Label(self.right_pane, text=\"API Key\")\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)\n\n self.api_key_entry = ttk.Entry(self.right_pane, text=\"ABC...\")\n self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)\n\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)\n\n self.top_categories_label = ttk.Label(self.right_pane, text=\"Top Categories\")\n self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)\n\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)\n\n self.category_map = {}\n\n self.randomize()\n \n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0',tk.END).replace(\"\\n\",\"\"))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state=\"normal\")\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state=\"disabled\")\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == \"none\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', \"\")\n if value == \"categories\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == \"pageid\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == \"sections\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == \"html\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in [\"wiki\", \"sources\", \"article\", \"stub\",\n \"wayback\", \"cs1\"]:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = \"\"\n for cat in cats:\n hit = \"hits\" if self.category_map[cat] > 1 else \"hit\"\n display += f\"{cat}, {self.category_map[cat]} {hit}\\n\"\n self.top_categories.configure(state=\"normal\")\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state=\"disabled\")\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == \"\":\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = (now-datetime.timedelta(days=14))\n #today = now.strftime()\n query = \"\"\n for cat in self.sorted_categories():\n query += f\"{cat},\"\n search = api.get_top_headlines(q=query,\n sources=\"bbc-news,the-verge\",\n language=\"en\")\n news = \"\"\n for article in search[\"articles\"]:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n\nif __name__ == \"__main__\":\n main_window = MainWindow()\n", "step-ids": [ 8, 9, 11, 12, 13 ] }
[ 8, 9, 11, 12, 13 ]
import simplejson as json json_list = [ "/content/squash-generation/squash/final/Custom.json", "/content/squash-generation/squash/temp/Custom/final_qa_set.json", "/content/squash-generation/squash/temp/Custom/generated_questions.json", "/content/squash-generation/squash/temp/Custom/nbest_predictions.json", "/content/squash-generation/squash/temp/Custom/null_odds.json", "/content/squash-generation/squash/temp/Custom/predictions.json" ] for i in json_list: with open(i,) as f: obj = json.load(f) f.close() outfile = open(i, "w") outfile.write(json.dumps(obj, indent=4, sort_keys=True)) outfile.close()
normal
{ "blob_id": "f37d016dc49820239eb42198ca922e8681a2e0a6", "index": 6929, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n", "step-3": "<mask token>\njson_list = ['/content/squash-generation/squash/final/Custom.json',\n '/content/squash-generation/squash/temp/Custom/final_qa_set.json',\n '/content/squash-generation/squash/temp/Custom/generated_questions.json',\n '/content/squash-generation/squash/temp/Custom/nbest_predictions.json',\n '/content/squash-generation/squash/temp/Custom/null_odds.json',\n '/content/squash-generation/squash/temp/Custom/predictions.json']\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n", "step-4": "import simplejson as json\njson_list = ['/content/squash-generation/squash/final/Custom.json',\n '/content/squash-generation/squash/temp/Custom/final_qa_set.json',\n '/content/squash-generation/squash/temp/Custom/generated_questions.json',\n '/content/squash-generation/squash/temp/Custom/nbest_predictions.json',\n '/content/squash-generation/squash/temp/Custom/null_odds.json',\n '/content/squash-generation/squash/temp/Custom/predictions.json']\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n", "step-5": "import simplejson as json\n\njson_list = [ \"/content/squash-generation/squash/final/Custom.json\",\n \"/content/squash-generation/squash/temp/Custom/final_qa_set.json\", \n \"/content/squash-generation/squash/temp/Custom/generated_questions.json\",\n \"/content/squash-generation/squash/temp/Custom/nbest_predictions.json\", \n \"/content/squash-generation/squash/temp/Custom/null_odds.json\",\n \"/content/squash-generation/squash/temp/Custom/predictions.json\" ]\n\nfor i in json_list:\n with open(i,) as f:\n obj = json.load(f)\n f.close() \n outfile = open(i, \"w\")\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
n = input("Enter a number: ") def fact(num): factorial = 1 if int(num) >= 1: for i in range (1,int(n)+1): factorial = factorial * i return factorial print(fact(n))
normal
{ "blob_id": "93b00b5c1bec38d2a4ac109f1533d3c0d9e99044", "index": 5763, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef fact(num):\n factorial = 1\n if int(num) >= 1:\n for i in range(1, int(n) + 1):\n factorial = factorial * i\n return factorial\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef fact(num):\n factorial = 1\n if int(num) >= 1:\n for i in range(1, int(n) + 1):\n factorial = factorial * i\n return factorial\n\n\nprint(fact(n))\n", "step-4": "n = input('Enter a number: ')\n\n\ndef fact(num):\n factorial = 1\n if int(num) >= 1:\n for i in range(1, int(n) + 1):\n factorial = factorial * i\n return factorial\n\n\nprint(fact(n))\n", "step-5": "n = input(\"Enter a number: \")\n\ndef fact(num):\n factorial = 1\n if int(num) >= 1:\n for i in range (1,int(n)+1):\n factorial = factorial * i\n return factorial\n\nprint(fact(n))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import subprocess class Audio: def __init__(self): self.sox_process = None def kill_sox(self, timeout=1): if self.sox_process is not None: self.sox_process.terminate() try: self.sox_process.wait(timeout=timeout) except subprocess.TimeoutExpired: self.sox_process.kill() self.sox_process.wait(timeout=timeout) self.sox_process = None # trying a lower buffer size def run_sox(self, scale, preset, buffer=20): ''' Builds and returns a sox command from a preset object ''' buffer = 17 multiplier = 100 command_effects = [] command_effects += ["pitch", str(scale * multiplier)] # Volume boosting if preset.volume_boost != None: command_effects += ["vol", str(preset.volume_boost) + "dB"] else: # Fix a bug where SoX uses last given volumne command_effects += ["vol", "0"] # Downsampling if preset.downsample_amount != None: command_effects += ["downsample", str(preset.downsample_amount)] else: # Append downsample of 1 to fix a bug where the downsample isn't being reverted # when we disable the effect with it on. command_effects += ["downsample", "1"] command = ["sox", "--buffer", str(buffer), "-q", "-t", "pulseaudio", "default", "-t", "pulseaudio", "Lyrebird-Output"] + command_effects self.sox_process = subprocess.Popen(command) def get_sink_name(self, tuple): if tuple[0] == "sink_name": return tuple[1] elif tuple[0] == "source_name": return tuple[1] else: return None def load_pa_modules(self): self.null_sink = subprocess.check_call( 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'.split(' ') ) self.remap_sink = subprocess.check_call( 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'\ .split(' ') ) def get_pactl_modules(self): ''' Parses `pactl info short` into tuples containing the module ID, the module type and the attributes of the module. It is designed only for named modules and as such junk data may be included in the returned list. Returns an array of tuples that take the form: (module_id (str), module_type (str), attributes (attribute tuples)) The attribute tuples: (key (str), value (str)) An example output might look like: [ ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ), ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] ) ] ''' pactl_list = subprocess.run(["pactl", "list", "short"], capture_output=True, encoding="utf8") lines = pactl_list.stdout data = [] split_lines = lines.split("\n") for line in split_lines: info = line.split("\t") if len(info) <= 2: continue if info[2] and len(info[2]) > 0: key_values = list(map(lambda key_value: tuple(key_value.split("=")), info[2].split(" "))) data.append((info[0], info[1], key_values)) else: data.append((info[0], info[1], [])) return data def unload_pa_modules(self): ''' Unloads all Lyrebird null sinks. ''' modules = self.get_pactl_modules() lyrebird_module_ids = [] for module in modules: if len(module) < 3: continue; if len(module[2]) < 1: continue; if module[1] == "module-null-sink": sink_name = self.get_sink_name(module[2][0]) if sink_name == "Lyrebird-Output": lyrebird_module_ids.append(module[0]) elif module[1] == "module-remap-source": sink_name = self.get_sink_name(module[2][0]) if sink_name == "Lyrebird-Input": lyrebird_module_ids.append(module[0]) for id in lyrebird_module_ids: subprocess.run(["pactl", "unload-module", str(id)])
normal
{ "blob_id": "d35d26cc50da9a3267edd2da706a4b6e653d22ac", "index": 6555, "step-1": "<mask token>\n\n\nclass Audio:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n <mask token>\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n", "step-4": "import subprocess\n\n\nclass Audio:\n\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n def run_sox(self, scale, preset, buffer=20):\n \"\"\"\n Builds and returns a sox command from a preset object\n \"\"\"\n buffer = 17\n multiplier = 100\n command_effects = []\n command_effects += ['pitch', str(scale * multiplier)]\n if preset.volume_boost != None:\n command_effects += ['vol', str(preset.volume_boost) + 'dB']\n else:\n command_effects += ['vol', '0']\n if preset.downsample_amount != None:\n command_effects += ['downsample', str(preset.downsample_amount)]\n else:\n command_effects += ['downsample', '1']\n command = ['sox', '--buffer', str(buffer), '-q', '-t', 'pulseaudio',\n 'default', '-t', 'pulseaudio', 'Lyrebird-Output'] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == 'sink_name':\n return tuple[1]\n elif tuple[0] == 'source_name':\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'\n .split(' '))\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\n .split(' '))\n\n def get_pactl_modules(self):\n \"\"\"\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n \"\"\"\n pactl_list = subprocess.run(['pactl', 'list', 'short'],\n capture_output=True, encoding='utf8')\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split('\\n')\n for line in split_lines:\n info = line.split('\\t')\n if len(info) <= 2:\n continue\n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.\n split('=')), info[2].split(' ')))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n \"\"\"\n Unloads all Lyrebird null sinks.\n \"\"\"\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue\n if len(module[2]) < 1:\n continue\n if module[1] == 'module-null-sink':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Output':\n lyrebird_module_ids.append(module[0])\n elif module[1] == 'module-remap-source':\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == 'Lyrebird-Input':\n lyrebird_module_ids.append(module[0])\n for id in lyrebird_module_ids:\n subprocess.run(['pactl', 'unload-module', str(id)])\n", "step-5": "import subprocess\n\nclass Audio:\n def __init__(self):\n self.sox_process = None\n\n def kill_sox(self, timeout=1):\n if self.sox_process is not None:\n self.sox_process.terminate()\n try:\n self.sox_process.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.sox_process.kill()\n self.sox_process.wait(timeout=timeout)\n self.sox_process = None\n\n # trying a lower buffer size\n def run_sox(self, scale, preset, buffer=20):\n '''\n Builds and returns a sox command from a preset object\n '''\n buffer = 17\n multiplier = 100\n command_effects = []\n\n command_effects += [\"pitch\", str(scale * multiplier)]\n\n # Volume boosting\n if preset.volume_boost != None:\n command_effects += [\"vol\", str(preset.volume_boost) + \"dB\"]\n else:\n # Fix a bug where SoX uses last given volumne\n command_effects += [\"vol\", \"0\"]\n\n # Downsampling\n if preset.downsample_amount != None:\n command_effects += [\"downsample\", str(preset.downsample_amount)]\n else:\n # Append downsample of 1 to fix a bug where the downsample isn't being reverted\n # when we disable the effect with it on.\n command_effects += [\"downsample\", \"1\"]\n\n command = [\"sox\", \"--buffer\", str(buffer), \"-q\", \"-t\", \"pulseaudio\", \"default\", \"-t\", \"pulseaudio\", \"Lyrebird-Output\"] + command_effects\n self.sox_process = subprocess.Popen(command)\n\n def get_sink_name(self, tuple):\n if tuple[0] == \"sink_name\":\n return tuple[1]\n elif tuple[0] == \"source_name\":\n return tuple[1]\n else:\n return None\n\n def load_pa_modules(self):\n self.null_sink = subprocess.check_call(\n 'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description=\"Lyrebird Output\"'.split(' ')\n )\n self.remap_sink = subprocess.check_call(\n 'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description=\"Lyrebird Virtual Input\"'\\\n .split(' ')\n )\n\n def get_pactl_modules(self):\n '''\n Parses `pactl info short` into tuples containing the module ID,\n the module type and the attributes of the module. It is designed\n only for named modules and as such junk data may be included in\n the returned list.\n \n Returns an array of tuples that take the form:\n (module_id (str), module_type (str), attributes (attribute tuples))\n \n The attribute tuples:\n (key (str), value (str))\n \n An example output might look like:\n [\n ( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),\n ( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )\n ]\n '''\n pactl_list = subprocess.run([\"pactl\", \"list\", \"short\"], capture_output=True, encoding=\"utf8\")\n lines = pactl_list.stdout\n data = []\n split_lines = lines.split(\"\\n\")\n for line in split_lines:\n info = line.split(\"\\t\")\n if len(info) <= 2:\n continue\n \n if info[2] and len(info[2]) > 0:\n key_values = list(map(lambda key_value: tuple(key_value.split(\"=\")), info[2].split(\" \")))\n data.append((info[0], info[1], key_values))\n else:\n data.append((info[0], info[1], []))\n return data\n\n def unload_pa_modules(self):\n '''\n Unloads all Lyrebird null sinks.\n '''\n modules = self.get_pactl_modules()\n lyrebird_module_ids = []\n for module in modules:\n if len(module) < 3:\n continue;\n if len(module[2]) < 1:\n continue;\n\n if module[1] == \"module-null-sink\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Output\":\n lyrebird_module_ids.append(module[0])\n elif module[1] == \"module-remap-source\":\n sink_name = self.get_sink_name(module[2][0])\n if sink_name == \"Lyrebird-Input\":\n lyrebird_module_ids.append(module[0])\n\n for id in lyrebird_module_ids:\n subprocess.run([\"pactl\", \"unload-module\", str(id)])\n", "step-ids": [ 1, 6, 8, 9, 10 ] }
[ 1, 6, 8, 9, 10 ]
''' Created on 4 Oct 2016 @author: MetalInvest ''' def isHammerHangman(high, low, open, close): body = abs(open - close) leg = min(open, close) - low return leg / body >= 2.0 and high/max(open, close) <= 1.08 def isEngulfing(df, bottom = True): open_0 = df['open'][-1] close_0 = df['close'][-1] open_1 = df['open'][-2] close_1 = df['close'][-2] body_0 = close_0 - open_0 body_1 = close_1 - open_1 if bottom: return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1) else: return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1 def isDarkCloud(): pass def isPiercing(): pass def jap_candle_reversal(df, context): # we check strong trend reversal reversal_pattern index = 0.0 # hammer & hangman if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df['close'][-1]): index += g.reversal_index if isEngulfing(df): index += g.reversal_index return index
normal
{ "blob_id": "6e739c30b3e7c15bd90b74cfd5a1d6827e863a44", "index": 4413, "step-1": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n", "step-3": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n\n\ndef isPiercing():\n pass\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n", "step-4": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\ndef isDarkCloud():\n pass\n\n\ndef isPiercing():\n pass\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n", "step-5": "'''\r\nCreated on 4 Oct 2016\r\n\r\n@author: MetalInvest\r\n'''\r\n\r\ndef isHammerHangman(high, low, open, close):\r\n body = abs(open - close)\r\n leg = min(open, close) - low\r\n return leg / body >= 2.0 and high/max(open, close) <= 1.08\r\n \r\ndef isEngulfing(df, bottom = True):\r\n open_0 = df['open'][-1]\r\n close_0 = df['close'][-1]\r\n open_1 = df['open'][-2]\r\n close_1 = df['close'][-2]\r\n body_0 = close_0 - open_0\r\n body_1 = close_1 - open_1\r\n if bottom: \r\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\r\n else:\r\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\r\n\r\ndef isDarkCloud():\r\n pass\r\n\r\ndef isPiercing():\r\n pass\r\n\r\ndef jap_candle_reversal(df, context):\r\n # we check strong trend reversal reversal_pattern\r\n index = 0.0\r\n # hammer & hangman\r\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df['close'][-1]):\r\n index += g.reversal_index\r\n if isEngulfing(df):\r\n index += g.reversal_index\r\n return index", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.shortcuts import render, redirect from django.utils.crypto import get_random_string def index(request): if not "word" in request.session: request.session["word"] = 'Empty' if not "count" in request.session: request.session["count"] = 0 if request.method == "GET": return render(request, "app_one/index.html") if request.method == "POST": request.session['word'] = get_random_string(length=14) request.session['count'] += 1 return redirect('/') # def generator(request): # return redirect('/') def reset(request): request.session['count'] = 0 return redirect('/')
normal
{ "blob_id": "2ec5e43860a1d248a2f5cd1abc26676342275425", "index": 8589, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n", "step-3": "<mask token>\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n", "step-4": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n", "step-5": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not \"word\" in request.session:\n request.session[\"word\"] = 'Empty'\n if not \"count\" in request.session:\n request.session[\"count\"] = 0\n if request.method == \"GET\":\n return render(request, \"app_one/index.html\")\n if request.method == \"POST\":\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n# def generator(request):\n \n# return redirect('/')\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import datetime import shutil from pathlib import Path from jinja2 import Environment, FileSystemLoader from dataclasses import dataclass PATH_TO_TEMPLATES = Path('TEMPLATES/') PATH_TO_RESOURCES = Path('RESOURCES/') PATH_TO_OUTPUT = Path('../docs/') URL_ROOT = "https://katys.cz/" link_to_homepage = "/" # TODO: always / in production html_file_suffix = ".html" @dataclass() class Page(object): title: str keywords: str description: str content_file: str url: str language: str last_mod: datetime.datetime phone: str = '+420 603 217 867' email: str = '[email protected]' def keys(self): """Get keys that allows conversion of this class to dictionary. Returns: List[str]: List of the keys to be passed to template. """ return ['title', 'keywords', 'description', 'url', 'content_file', 'language', 'phone', 'email'] def __getitem__(self, key): """Allows conversion of this class to dictionary. """ return getattr(self, key) def generate_site(self): with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han: template = Environment( loader=FileSystemLoader(PATH_TO_TEMPLATES) ).from_string(tem_han.read()) html_str = template.render( **dict(self), link_to_homepage=link_to_homepage ) return html_str @property def absolute_url(self): if self.url != 'index': return URL_ROOT + self.url + html_file_suffix return URL_ROOT @property def last_modified(self): if self.last_mod is None: return None return self.last_mod.strftime('%Y-%m-%d') unified_description = "Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další." unified_keywords = "Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec" pages = [ Page(title="Domů", keywords=unified_keywords, description=unified_description, url="index", content_file='page_home.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Reference", keywords=unified_keywords, description=unified_description, url="reference", content_file='page_reference.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), *( Page(title="Okna", keywords=unified_keywords, description=unified_description, url="okna", content_file='page_okna.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Vchodové dveře", keywords=unified_keywords, description=unified_description, url="vchodove-dvere", content_file='page_vchodove_dvere.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Interiérové dveře", keywords=unified_keywords, description=unified_description, url="interierove-dvere", content_file='page_interierove_dvere.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Zimní zahrady", keywords=unified_keywords, description=unified_description, url="zimni-zahrady", content_file='page_zimni_zahrady.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Interiéry", keywords=unified_keywords, description=unified_description, url="interiery", content_file='page_interiery.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Kuchyně", keywords=unified_keywords, description=unified_description, url="kuchyne", content_file='page_kuchyne.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Nábytek", keywords=unified_keywords, description=unified_description, url="nabytek", content_file='page_nabytek.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Stavební truhlářství", keywords=unified_keywords, description=unified_description, url="stavebni-truhlarstvi", content_file='page_stavebni_truhlarstvi.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Stoly a židle", keywords=unified_keywords, description=unified_description, url="stoly-a-zidle", content_file='page_stoly_a_zidle.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), ), Page(title="Zelená úsporám", keywords=unified_keywords, description=unified_description, url="zelena-usporam", content_file='page_zelena_usporam.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Fotogalerie", keywords=unified_keywords, description=unified_description, url="fotogalerie", content_file='page_fotogalerie.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Certifikáty", keywords=unified_keywords, description=unified_description, url="certifikaty", content_file='page_certifikaty.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ), Page(title="Kontakt", keywords=unified_keywords, description=unified_description, url="kontakt", content_file='page_kontakt.html', language="cs", last_mod=datetime.datetime(2020, 12, 17) ) ] # Remove all existing resources if PATH_TO_OUTPUT.exists(): shutil.rmtree(PATH_TO_OUTPUT) # Create new dir PATH_TO_OUTPUT.mkdir() for page in pages: content = page.generate_site() with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp: fp.write(content) # Copy resources shutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True) # Generate resource map: with open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han: template = Environment( loader=FileSystemLoader(PATH_TO_TEMPLATES) ).from_string(tem_han.read()) html_str = template.render( sites=pages ) with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml: f_xml.write(html_str) robots_txt_content = f"""User-agent: * Allow: / Sitemap: {URL_ROOT}sitemap.xml""" with PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h: robots_txt_h.write(robots_txt_content)
normal
{ "blob_id": "5cc18af40befab444df44bf3da1f0175e5d18983", "index": 8206, "step-1": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = '[email protected]'\n <mask token>\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n <mask token>\n <mask token>\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = '[email protected]'\n <mask token>\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = '[email protected]'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n", "step-4": "import datetime\nimport shutil\nfrom pathlib import Path\nfrom jinja2 import Environment, FileSystemLoader\nfrom dataclasses import dataclass\nPATH_TO_TEMPLATES = Path('TEMPLATES/')\nPATH_TO_RESOURCES = Path('RESOURCES/')\nPATH_TO_OUTPUT = Path('../docs/')\nURL_ROOT = 'https://katys.cz/'\nlink_to_homepage = '/'\nhtml_file_suffix = '.html'\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = '[email protected]'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\nunified_description = (\n 'Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další.'\n )\nunified_keywords = 'Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec'\npages = [Page(title='Domů', keywords=unified_keywords, description=\n unified_description, url='index', content_file='page_home.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Reference', keywords=unified_keywords, description=unified_description,\n url='reference', content_file='page_reference.html', language='cs',\n last_mod=datetime.datetime(2020, 12, 17)), *(Page(title='Okna',\n keywords=unified_keywords, description=unified_description, url='okna',\n content_file='page_okna.html', language='cs', last_mod=datetime.\n datetime(2020, 12, 17)), Page(title='Vchodové dveře', keywords=\n unified_keywords, description=unified_description, url='vchodove-dvere',\n content_file='page_vchodove_dvere.html', language='cs', last_mod=\n datetime.datetime(2020, 12, 17)), Page(title='Interiérové dveře',\n keywords=unified_keywords, description=unified_description, url=\n 'interierove-dvere', content_file='page_interierove_dvere.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Zimní zahrady', keywords=unified_keywords, description=\n unified_description, url='zimni-zahrady', content_file=\n 'page_zimni_zahrady.html', language='cs', last_mod=datetime.datetime(\n 2020, 12, 17)), Page(title='Interiéry', keywords=unified_keywords,\n description=unified_description, url='interiery', content_file=\n 'page_interiery.html', language='cs', last_mod=datetime.datetime(2020, \n 12, 17)), Page(title='Kuchyně', keywords=unified_keywords, description=\n unified_description, url='kuchyne', content_file='page_kuchyne.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Nábytek', keywords=unified_keywords, description=unified_description,\n url='nabytek', content_file='page_nabytek.html', language='cs',\n last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Stavební truhlářství', keywords=unified_keywords, description=\n unified_description, url='stavebni-truhlarstvi', content_file=\n 'page_stavebni_truhlarstvi.html', language='cs', last_mod=datetime.\n datetime(2020, 12, 17)), Page(title='Stoly a židle', keywords=\n unified_keywords, description=unified_description, url='stoly-a-zidle',\n content_file='page_stoly_a_zidle.html', language='cs', last_mod=\n datetime.datetime(2020, 12, 17))), Page(title='Zelená úsporám',\n keywords=unified_keywords, description=unified_description, url=\n 'zelena-usporam', content_file='page_zelena_usporam.html', language=\n 'cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Fotogalerie', keywords=unified_keywords, description=\n unified_description, url='fotogalerie', content_file=\n 'page_fotogalerie.html', language='cs', last_mod=datetime.datetime(2020,\n 12, 17)), Page(title='Certifikáty', keywords=unified_keywords,\n description=unified_description, url='certifikaty', content_file=\n 'page_certifikaty.html', language='cs', last_mod=datetime.datetime(2020,\n 12, 17)), Page(title='Kontakt', keywords=unified_keywords, description=\n unified_description, url='kontakt', content_file='page_kontakt.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17))]\nif PATH_TO_OUTPUT.exists():\n shutil.rmtree(PATH_TO_OUTPUT)\nPATH_TO_OUTPUT.mkdir()\nfor page in pages:\n content = page.generate_site()\n with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:\n fp.write(content)\nshutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)\nwith open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(sites=pages)\n with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:\n f_xml.write(html_str)\nrobots_txt_content = f\"\"\"User-agent: *\nAllow: /\nSitemap: {URL_ROOT}sitemap.xml\"\"\"\nwith PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:\n robots_txt_h.write(robots_txt_content)\n", "step-5": "import datetime\nimport shutil\nfrom pathlib import Path\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom dataclasses import dataclass\n\nPATH_TO_TEMPLATES = Path('TEMPLATES/')\nPATH_TO_RESOURCES = Path('RESOURCES/')\nPATH_TO_OUTPUT = Path('../docs/')\nURL_ROOT = \"https://katys.cz/\"\n\nlink_to_homepage = \"/\" # TODO: always / in production\nhtml_file_suffix = \".html\"\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = '[email protected]'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(\n loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(\n **dict(self),\n link_to_homepage=link_to_homepage\n )\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\nunified_description = \"Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další.\"\nunified_keywords = \"Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec\"\n\npages = [\n Page(title=\"Domů\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"index\",\n content_file='page_home.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Reference\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"reference\",\n content_file='page_reference.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n *(\n Page(title=\"Okna\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"okna\",\n content_file='page_okna.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Vchodové dveře\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"vchodove-dvere\",\n content_file='page_vchodove_dvere.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Interiérové dveře\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"interierove-dvere\",\n content_file='page_interierove_dvere.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Zimní zahrady\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"zimni-zahrady\",\n content_file='page_zimni_zahrady.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Interiéry\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"interiery\",\n content_file='page_interiery.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Kuchyně\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"kuchyne\",\n content_file='page_kuchyne.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Nábytek\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"nabytek\",\n content_file='page_nabytek.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Stavební truhlářství\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"stavebni-truhlarstvi\",\n content_file='page_stavebni_truhlarstvi.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Stoly a židle\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"stoly-a-zidle\",\n content_file='page_stoly_a_zidle.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n ),\n Page(title=\"Zelená úsporám\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"zelena-usporam\",\n content_file='page_zelena_usporam.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Fotogalerie\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"fotogalerie\",\n content_file='page_fotogalerie.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Certifikáty\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"certifikaty\",\n content_file='page_certifikaty.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Kontakt\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"kontakt\",\n content_file='page_kontakt.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n )\n]\n\n# Remove all existing resources\nif PATH_TO_OUTPUT.exists():\n shutil.rmtree(PATH_TO_OUTPUT)\n\n# Create new dir\nPATH_TO_OUTPUT.mkdir()\n\nfor page in pages:\n content = page.generate_site()\n with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:\n fp.write(content)\n\n# Copy resources\nshutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)\n\n# Generate resource map:\nwith open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:\n template = Environment(\n loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(\n sites=pages\n )\n with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:\n f_xml.write(html_str)\n\nrobots_txt_content = f\"\"\"User-agent: *\nAllow: /\nSitemap: {URL_ROOT}sitemap.xml\"\"\"\nwith PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:\n robots_txt_h.write(robots_txt_content)\n", "step-ids": [ 3, 5, 6, 9, 10 ] }
[ 3, 5, 6, 9, 10 ]
import requests import os from dotenv import load_dotenv from datetime import datetime load_dotenv(".env") # loads the environment file USERNAME = os.getenv("USER") TOKEN = os.getenv("TOKEN") pixela_endpoint = "https://pixe.la/v1/users" # MAKING AN ACCOUNT user_params = { "token": TOKEN, "username": USERNAME, "agreeTermsOfService": "yes", "notMinor": "yes", } # response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json # print(response.text) # gives the response as a piece of text # CREATING A GRAPH graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs" # endpoint for the graph creation graph_config = { "id": "graph1", "name": "Reading Graph", "unit": "hours", "type": "int", "color": "shibafu" } headers = { "X-USER-TOKEN": TOKEN } # response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph # print(response.text) # POST A PIXEL post_pixel_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1" # today = datetime(year=2020, month=12, day=25) custom date today = datetime.now() formatted_date = today.strftime("%Y%m%d") pixel_config = { "date": today.strftime("%Y%m%d"), "quantity": input("How many hours did you spend reading today? "), } response = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel print(response.text) # UPDATING A PIXEL update_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}" updated_pixel = { "quantity": "3" } # response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel) # print(response.text) # DELETING A PIXEL # delete_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}" # response = requests.delete(url=delete_endpoint,headers=headers)
normal
{ "blob_id": "ba34dfcad0cb9bac9c462bdf60e55dee6ba9d58d", "index": 9255, "step-1": "<mask token>\n", "step-2": "<mask token>\nload_dotenv('.env')\n<mask token>\nprint(response.text)\n<mask token>\n", "step-3": "<mask token>\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n", "step-4": "import requests\nimport os\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n", "step-5": "import requests\r\nimport os\r\nfrom dotenv import load_dotenv\r\nfrom datetime import datetime\r\n\r\nload_dotenv(\".env\") # loads the environment file\r\n\r\n\r\nUSERNAME = os.getenv(\"USER\")\r\nTOKEN = os.getenv(\"TOKEN\")\r\npixela_endpoint = \"https://pixe.la/v1/users\"\r\n\r\n\r\n\r\n# MAKING AN ACCOUNT\r\nuser_params = {\r\n \"token\": TOKEN,\r\n \"username\": USERNAME,\r\n \"agreeTermsOfService\": \"yes\",\r\n \"notMinor\": \"yes\",\r\n\r\n}\r\n\r\n# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json\r\n# print(response.text) # gives the response as a piece of text\r\n\r\n\r\n# CREATING A GRAPH\r\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\" # endpoint for the graph creation\r\n\r\ngraph_config = {\r\n \"id\": \"graph1\",\r\n \"name\": \"Reading Graph\",\r\n \"unit\": \"hours\",\r\n \"type\": \"int\",\r\n \"color\": \"shibafu\"\r\n\r\n}\r\n\r\nheaders = {\r\n \"X-USER-TOKEN\": TOKEN\r\n}\r\n\r\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph\r\n# print(response.text)\r\n\r\n\r\n# POST A PIXEL\r\npost_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1\"\r\n\r\n\r\n# today = datetime(year=2020, month=12, day=25) custom date\r\ntoday = datetime.now()\r\nformatted_date = today.strftime(\"%Y%m%d\")\r\npixel_config = {\r\n \"date\": today.strftime(\"%Y%m%d\"),\r\n \"quantity\": input(\"How many hours did you spend reading today? \"),\r\n\r\n}\r\n\r\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel\r\nprint(response.text)\r\n\r\n\r\n# UPDATING A PIXEL\r\n\r\nupdate_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\nupdated_pixel = {\r\n \"quantity\": \"3\"\r\n}\r\n\r\n# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)\r\n# print(response.text)\r\n\r\n\r\n# DELETING A PIXEL\r\n\r\n# delete_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\n# response = requests.delete(url=delete_endpoint,headers=headers)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n). For example, S = "ADOBECODEBANC" T = "ABC" Minimum window is "BANC". Note: If there is no such window in S that covers all characters in T, return the empty string "". If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S. ''' from collections import defaultdict class Solution: """ @param: source : A string @param: target: A string @return: A string denote the minimum window, return "" if there is no such a string """ def minWindow(self, source, target): # create a hashmap/dictionary for target, {key: value = char: count} s_char_count = defaultdict(int) t_char_count = defaultdict(int) for char in target: t_char_count[char] += 1 j = 0 min_substr = '' min_length = float('inf') for i in range(len(source)): while j < len(source) and not self.is_contain(s_char_count, t_char_count): s_char_count[source[j]] += 1 j += 1 if self.is_contain(s_char_count, t_char_count): if min_length > j - i: min_length = j - i min_substr = source[i:j] s_char_count[source[i]] -= 1 return min_substr def is_contain(self, s_char_count, t_char_count): for char in t_char_count: if char not in s_char_count or s_char_count[char] < t_char_count[char]: return False return True
normal
{ "blob_id": "665a868ee71f247a621d82108e545257296e0427", "index": 7048, "step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def minWindow(self, source, target):\n s_char_count = defaultdict(int)\n t_char_count = defaultdict(int)\n for char in target:\n t_char_count[char] += 1\n j = 0\n min_substr = ''\n min_length = float('inf')\n for i in range(len(source)):\n while j < len(source) and not self.is_contain(s_char_count,\n t_char_count):\n s_char_count[source[j]] += 1\n j += 1\n if self.is_contain(s_char_count, t_char_count):\n if min_length > j - i:\n min_length = j - i\n min_substr = source[i:j]\n s_char_count[source[i]] -= 1\n return min_substr\n\n def is_contain(self, s_char_count, t_char_count):\n for char in t_char_count:\n if char not in s_char_count or s_char_count[char] < t_char_count[\n char]:\n return False\n return True\n", "step-3": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param: source : A string\n @param: target: A string\n @return: A string denote the minimum window, return \"\" if there is no such a string\n \"\"\"\n\n def minWindow(self, source, target):\n s_char_count = defaultdict(int)\n t_char_count = defaultdict(int)\n for char in target:\n t_char_count[char] += 1\n j = 0\n min_substr = ''\n min_length = float('inf')\n for i in range(len(source)):\n while j < len(source) and not self.is_contain(s_char_count,\n t_char_count):\n s_char_count[source[j]] += 1\n j += 1\n if self.is_contain(s_char_count, t_char_count):\n if min_length > j - i:\n min_length = j - i\n min_substr = source[i:j]\n s_char_count[source[i]] -= 1\n return min_substr\n\n def is_contain(self, s_char_count, t_char_count):\n for char in t_char_count:\n if char not in s_char_count or s_char_count[char] < t_char_count[\n char]:\n return False\n return True\n", "step-4": "<mask token>\nfrom collections import defaultdict\n\n\nclass Solution:\n \"\"\"\n @param: source : A string\n @param: target: A string\n @return: A string denote the minimum window, return \"\" if there is no such a string\n \"\"\"\n\n def minWindow(self, source, target):\n s_char_count = defaultdict(int)\n t_char_count = defaultdict(int)\n for char in target:\n t_char_count[char] += 1\n j = 0\n min_substr = ''\n min_length = float('inf')\n for i in range(len(source)):\n while j < len(source) and not self.is_contain(s_char_count,\n t_char_count):\n s_char_count[source[j]] += 1\n j += 1\n if self.is_contain(s_char_count, t_char_count):\n if min_length > j - i:\n min_length = j - i\n min_substr = source[i:j]\n s_char_count[source[i]] -= 1\n return min_substr\n\n def is_contain(self, s_char_count, t_char_count):\n for char in t_char_count:\n if char not in s_char_count or s_char_count[char] < t_char_count[\n char]:\n return False\n return True\n", "step-5": "'''\nGiven a string S and a string T,\nfind the minimum window in S which will contain all the characters in T in complexity O(n).\n\nFor example,\nS = \"ADOBECODEBANC\"\nT = \"ABC\"\nMinimum window is \"BANC\".\n\nNote:\nIf there is no such window in S that covers all characters in T, return the empty string \"\".\n\nIf there are multiple such windows,\nyou are guaranteed that there will always be only one unique minimum window in S.\n'''\nfrom collections import defaultdict\nclass Solution:\n \"\"\"\n @param: source : A string\n @param: target: A string\n @return: A string denote the minimum window, return \"\" if there is no such a string\n \"\"\"\n def minWindow(self, source, target):\n # create a hashmap/dictionary for target, {key: value = char: count}\n s_char_count = defaultdict(int)\n t_char_count = defaultdict(int)\n\n for char in target:\n t_char_count[char] += 1\n\n j = 0\n min_substr = ''\n min_length = float('inf')\n\n for i in range(len(source)):\n while j < len(source) and not self.is_contain(s_char_count, t_char_count):\n s_char_count[source[j]] += 1\n j += 1\n\n if self.is_contain(s_char_count, t_char_count):\n if min_length > j - i:\n min_length = j - i\n min_substr = source[i:j]\n s_char_count[source[i]] -= 1\n\n return min_substr\n\n def is_contain(self, s_char_count, t_char_count):\n for char in t_char_count:\n if char not in s_char_count or s_char_count[char] < t_char_count[char]:\n return False\n return True\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import math def vol_shell(r1, r2): a=abs((4/3)*math.pi*((r1**3)-(r2**3))) return round(a,3) print(vol_shell(3,3))
normal
{ "blob_id": "cd234911c1f990b8029dfa792d132847bf39a6aa", "index": 445, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n", "step-4": "import math\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n", "step-5": "\nimport math\ndef vol_shell(r1, r2):\n a=abs((4/3)*math.pi*((r1**3)-(r2**3)))\n return round(a,3)\nprint(vol_shell(3,3))\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# This handle the url for routing from django.urls import path from . import views # Defines views to pass dynamic data to listings page urlpatterns = [ path('', views.index, name='listings'), path('<int:listing_id>', views.listing, name='listing'), path('search', views.search, name='search') ]
normal
{ "blob_id": "be894830bb0dde6bacaea6be823391e0445603c3", "index": 1192, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n", "step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n", "step-4": "# This handle the url for routing\n\nfrom django.urls import path\nfrom . import views\n\n# Defines views to pass dynamic data to listings page\nurlpatterns = [\n path('', views.index, name='listings'),\n path('<int:listing_id>', views.listing, name='listing'),\n path('search', views.search, name='search')\n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Generated by Django 2.2 on 2021-01-31 14:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0004_product_pr_number'), ] operations = [ migrations.RemoveField( model_name='payment', name='PA_id', ), migrations.AddField( model_name='payment', name='buyer', field=models.CharField(default=0, max_length=32), preserve_default=False, ), migrations.AlterField( model_name='payment', name='PA_type', field=models.CharField(default='credit', max_length=32), ), ]
normal
{ "blob_id": "388772386f25d6c2f9cc8778b7ce1b2ad0920851", "index": 6986, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n", "step-5": "# Generated by Django 2.2 on 2021-01-31 14:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0004_product_pr_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='payment',\n name='PA_id',\n ),\n migrations.AddField(\n model_name='payment',\n name='buyer',\n field=models.CharField(default=0, max_length=32),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='payment',\n name='PA_type',\n field=models.CharField(default='credit', max_length=32),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import requests import json l = list() with open ( "token.txt", "r") as f: token = f.read() # создаем заголовок, содержащий наш токен headers = {"X-Xapp-Token" : token} with open('dataset_24476_4.txt', 'r') as id: for line in id: address = "https://api.artsy.net/api/artists/" + line.strip() # инициируем запрос с заголовком r = requests.get(address, headers=headers) # разбираем ответ сервера j = json.loads(r.text) l.append((j['sortable_name'], j['birthday'])) #l.append((('Warhol Bandy', '1928'))) #l.append((('Warhol Aandy', '1928'))) l = sorted(l, key=lambda tup: (tup[1], tup[0])) for i in l: print(i[0]) # year = '0000' # new_l = [] # # k = [] # # for i in l: # if i[1] != year: # k = [] # k.append(i[0]) # year = i[1] # else: # k.append(i[0]) # k.sort() # print(next(name for name in k))
normal
{ "blob_id": "e1ecc08f66e094841647f72b78bcd29ed8d32668", "index": 5976, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\n<mask token>\nfor i in l:\n print(i[0])\n", "step-3": "<mask token>\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n", "step-4": "import requests\nimport json\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n", "step-5": "import requests\nimport json\n\nl = list()\n\nwith open ( \"token.txt\", \"r\") as f:\n\n token = f.read()\n\n # создаем заголовок, содержащий наш токен\n headers = {\"X-Xapp-Token\" : token}\n\n with open('dataset_24476_4.txt', 'r') as id:\n\n for line in id:\n address = \"https://api.artsy.net/api/artists/\" + line.strip()\n # инициируем запрос с заголовком\n r = requests.get(address, headers=headers)\n\n # разбираем ответ сервера\n j = json.loads(r.text)\n\n l.append((j['sortable_name'], j['birthday']))\n\n#l.append((('Warhol Bandy', '1928')))\n#l.append((('Warhol Aandy', '1928')))\n\n\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n\n# year = '0000'\n# new_l = []\n#\n# k = []\n#\n# for i in l:\n# if i[1] != year:\n# k = []\n# k.append(i[0])\n# year = i[1]\n# else:\n# k.append(i[0])\n# k.sort()\n# print(next(name for name in k))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#! /usr/bin/env python import sys import socket def handle_connection(sock): do_close = False while 1: try: data = sock.recv(4096) if not data: # closed! stop monitoring this socket. do_close = True break print 'data:', (data,) sock.sendall(data) if '.\r\n' in data: sock.close() do_close = True # stop monitoring this socket. break except socket.error: print 'no data waiting...' break return do_close if __name__ == '__main__': interface, port = sys.argv[1:3] port = int(port) print 'binding', interface, port sock = socket.socket() sock.bind( (interface, port) ) sock.listen(5) sock.setblocking(0) connections = [] while 1: # loop, doing two things: # first, get a new connection # second, process (receive/send) data for each existing connection # first, do we have a new connection waiting? try: print 'testing for new connection' (client_sock, client_address) = sock.accept() # if this succeeds, we got a new connection... no new connection # raises a 'socket.error' print 'got connection', client_address client_sock.setblocking(0) connections.append((client_sock, client_address)) except socket.error: # no new connection! do nothing. pass # now, process data for existing connections. open_connections = [] for (client_sock, client_address) in connections: print 'processing data for', client_address do_close = handle_connection(client_sock) if not do_close: open_connections.append((client_sock, client_address)) connections = open_connections
normal
{ "blob_id": "fde4c10e2ed0ed38d683a220e2985c3f3f336601", "index": 7258, "step-1": "#! /usr/bin/env python\nimport sys\nimport socket\n\ndef handle_connection(sock):\n do_close = False\n \n while 1:\n try:\n data = sock.recv(4096)\n if not data: # closed! stop monitoring this socket.\n do_close = True\n break\n\n print 'data:', (data,)\n\n sock.sendall(data)\n\n if '.\\r\\n' in data:\n sock.close()\n do_close = True # stop monitoring this socket.\n break\n except socket.error:\n print 'no data waiting...'\n break\n\n return do_close\n\nif __name__ == '__main__':\n interface, port = sys.argv[1:3]\n port = int(port)\n\n print 'binding', interface, port\n sock = socket.socket()\n sock.bind( (interface, port) )\n sock.listen(5)\n\n sock.setblocking(0)\n\n connections = []\n while 1:\n\n # loop, doing two things:\n # first, get a new connection\n # second, process (receive/send) data for each existing connection\n\n # first, do we have a new connection waiting?\n try:\n print 'testing for new connection'\n (client_sock, client_address) = sock.accept()\n\n # if this succeeds, we got a new connection... no new connection\n # raises a 'socket.error'\n print 'got connection', client_address\n client_sock.setblocking(0)\n connections.append((client_sock, client_address))\n except socket.error: # no new connection! do nothing.\n pass\n\n # now, process data for existing connections.\n open_connections = []\n for (client_sock, client_address) in connections:\n print 'processing data for', client_address\n do_close = handle_connection(client_sock)\n\n if not do_close:\n open_connections.append((client_sock, client_address))\n\n connections = open_connections\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from data_structures.datacenter import Datacenter, urllib, json, URL = "http://www.mocky.io/v2/5e539b332e00007c002dacbe" def get_data(url, max_retries=5, delay_between_retries=1): """ Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe and return it as a JSON object. ​ Args: url (str): The url to be fetched. max_retries (int): Number of retries. delay_between_retries (int): Delay between retries in seconds. Returns: data (dict) """ pass # the rest of your logic here for i in max_retries: while True: try time.sleep(delay_between_tries) response = urllib.request.urlopen(url) data = json.loads(response.read()) print (data) break except Exception: continue def main(): """ Main entry to our program. """ data = get_data(URL) if not data: raise ValueError('No data to process') datacenters = [ Datacenter(key, value) for key, value in data.items() ] pass # the rest of your logic here if __name__ == '__main__': main()
normal
{ "blob_id": "e56a7912b9940b1cab6c19d0047f1f60f0083f66", "index": 4911, "step-1": "from data_structures.datacenter import Datacenter, urllib, json,\n\n\nURL = \"http://www.mocky.io/v2/5e539b332e00007c002dacbe\"\n\n\ndef get_data(url, max_retries=5, delay_between_retries=1):\n \"\"\"\n Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe\n and return it as a JSON object.\n​\n Args:\n url (str): The url to be fetched.\n max_retries (int): Number of retries.\n delay_between_retries (int): Delay between retries in seconds.\n Returns:\n data (dict)\n \"\"\"\n pass # the rest of your logic here\n for i in max_retries:\n while True:\n try\n time.sleep(delay_between_tries)\n response = urllib.request.urlopen(url)\n data = json.loads(response.read())\n print (data)\n break\n except Exception:\n continue\n \n \n \n\n\n\n\n\n\ndef main():\n \"\"\"\n Main entry to our program.\n \"\"\"\n\n data = get_data(URL)\n\n if not data:\n raise ValueError('No data to process')\n\n datacenters = [\n Datacenter(key, value)\n for key, value in data.items()\n ]\n\n pass # the rest of your logic here\n\n\nif __name__ == '__main__':\n main()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/g/kreshuk/lukoianov/miniconda3/envs/inferno/bin/python3 # BASIC IMPORTS import argparse import os import subprocess import sys import numpy as np # INTERNAL IMPORTS from src.datasets import CentriollesDatasetOn, CentriollesDatasetBags, GENdataset from src.utils import get_basic_transforms, log_info, get_resps_transforms import src.implemented_models as impl_models # INFERNO IMPORTS import torch from inferno.trainers.basic import Trainer from torch.utils.data import DataLoader from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger from inferno.trainers.callbacks.scheduling import AutoLR if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run learning of simple CNN implementation') parser.add_argument('--model_name', type=str, default='', help='Name of the model from models dir') parser.add_argument('--test', action='store_true', help='Test this model on simpler dataset') parser.add_argument('--features', action='store_true', help='Representation of repsponces') parser.add_argument('--mil', action='store_true', help='Continue learning on the bag lavel') parser.add_argument('--id', type=str, default='default', help='Unique net id to save') parser.add_argument('--img_size', type=int, default=60, help='Size of input images') args = parser.parse_args() log_info('Params: ' + str(args)) if args.mil: train_tr, test_tr = get_resps_transforms(features=args.features) if args.test: train_ds = GENdataset(transform=train_tr, bags=False, crop=True) test_ds = GENdataset(train=False, transform=test_tr, bags=False, crop=True) log_info('Artificial MIL data is used') else: train_ds = CentriollesDatasetBags(transform=train_tr, inp_size=512, bags=False, crop=True) test_ds = CentriollesDatasetBags(train=False, transform=test_tr, inp_size=512, bags=False, crop=True) log_info('MIL dataset is used') else: train_tr, test_tr = get_basic_transforms() if args.test: train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir='dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args.img_size) test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir='dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args.img_size, train=False) log_info('Test bags dataset is used') else: train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir='dataset/artificial/train_pos/', neg_dir='dataset/artificial/train_neg/', inp_size=args.img_size, all_data=True) test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir='dataset/artificial/test_pos/', neg_dir='dataset/artificial/test_neg/', inp_size=args.img_size, all_data=True) log_info('ILC dataset is used') train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0) test_dl = DataLoader(test_ds, batch_size=4, shuffle=True, num_workers=0) log_info('Datasets are initialized!') # DIRS AND MODEL exec("model = impl_models.%s" % (args.model_name)) model_dir = os.path.join('models', args.model_name) curent_model_dir = os.path.join(model_dir, args.id) log_info('Model will be saved to %s' % (curent_model_dir)) log_info(' + Number of params: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) weight_dir = os.path.join(curent_model_dir, 'weights') log_info('Weights will be saved to %s' % (weight_dir)) if not os.path.exists(weight_dir): os.mkdir(weight_dir) logs_dir = os.path.join(curent_model_dir, 'logs') if not os.path.exists(logs_dir): os.mkdir(logs_dir) log_info('Logs will be saved to %s' % (logs_dir)) # Build trainer logger = TensorboardLogger(log_scalars_every=(1, 'iteration'), log_images_every=(np.inf, 'epochs')) def log_histogram(self, tag, values, bins=1000): pass logger.log_histogram = log_histogram trainer = Trainer(model)\ .build_criterion('CrossEntropyLoss') \ .build_metric('CategoricalError') \ .build_optimizer('Adam') \ .validate_every((2, 'epochs')) \ .save_every((5, 'epochs')) \ .save_to_directory(weight_dir) \ .set_max_num_epochs(10000) \ .build_logger(logger, log_directory=logs_dir) \ .register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=0.9, monitor_while='validating', consider_improvement_with_respect_to='best')) # Bind loaders trainer \ .bind_loader('train', train_dl) \ .bind_loader('validate', test_dl) if torch.cuda.is_available(): trainer.cuda() trainer.fit()
normal
{ "blob_id": "604c94e50b1fb9b5e451c4432113498410a4ac1f", "index": 5262, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Run learning of simple CNN implementation')\n parser.add_argument('--model_name', type=str, default='', help=\n 'Name of the model from models dir')\n parser.add_argument('--test', action='store_true', help=\n 'Test this model on simpler dataset')\n parser.add_argument('--features', action='store_true', help=\n 'Representation of repsponces')\n parser.add_argument('--mil', action='store_true', help=\n 'Continue learning on the bag lavel')\n parser.add_argument('--id', type=str, default='default', help=\n 'Unique net id to save')\n parser.add_argument('--img_size', type=int, default=60, help=\n 'Size of input images')\n args = parser.parse_args()\n log_info('Params: ' + str(args))\n if args.mil:\n train_tr, test_tr = get_resps_transforms(features=args.features)\n if args.test:\n train_ds = GENdataset(transform=train_tr, bags=False, crop=True)\n test_ds = GENdataset(train=False, transform=test_tr, bags=False,\n crop=True)\n log_info('Artificial MIL data is used')\n else:\n train_ds = CentriollesDatasetBags(transform=train_tr, inp_size=\n 512, bags=False, crop=True)\n test_ds = CentriollesDatasetBags(train=False, transform=test_tr,\n inp_size=512, bags=False, crop=True)\n log_info('MIL dataset is used')\n else:\n train_tr, test_tr = get_basic_transforms()\n if args.test:\n train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir=\n 'dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args\n .img_size)\n test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir=\n 'dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args\n .img_size, train=False)\n log_info('Test bags dataset is used')\n else:\n train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir=\n 'dataset/artificial/train_pos/', neg_dir=\n 'dataset/artificial/train_neg/', inp_size=args.img_size,\n all_data=True)\n test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir=\n 'dataset/artificial/test_pos/', neg_dir=\n 'dataset/artificial/test_neg/', inp_size=args.img_size,\n all_data=True)\n log_info('ILC dataset is used')\n train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0)\n test_dl = DataLoader(test_ds, batch_size=4, shuffle=True, num_workers=0)\n log_info('Datasets are initialized!')\n exec('model = impl_models.%s' % args.model_name)\n model_dir = os.path.join('models', args.model_name)\n curent_model_dir = os.path.join(model_dir, args.id)\n log_info('Model will be saved to %s' % curent_model_dir)\n log_info(' + Number of params: {}'.format(sum([p.data.nelement() for p in\n model.parameters()])))\n weight_dir = os.path.join(curent_model_dir, 'weights')\n log_info('Weights will be saved to %s' % weight_dir)\n if not os.path.exists(weight_dir):\n os.mkdir(weight_dir)\n logs_dir = os.path.join(curent_model_dir, 'logs')\n if not os.path.exists(logs_dir):\n os.mkdir(logs_dir)\n log_info('Logs will be saved to %s' % logs_dir)\n logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),\n log_images_every=(np.inf, 'epochs'))\n\n def log_histogram(self, tag, values, bins=1000):\n pass\n logger.log_histogram = log_histogram\n trainer = Trainer(model).build_criterion('CrossEntropyLoss').build_metric(\n 'CategoricalError').build_optimizer('Adam').validate_every((2,\n 'epochs')).save_every((5, 'epochs')).save_to_directory(weight_dir\n ).set_max_num_epochs(10000).build_logger(logger, log_directory=logs_dir\n ).register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=\n 0.9, monitor_while='validating',\n consider_improvement_with_respect_to='best'))\n trainer.bind_loader('train', train_dl).bind_loader('validate', test_dl)\n if torch.cuda.is_available():\n trainer.cuda()\n trainer.fit()\n", "step-3": "import argparse\nimport os\nimport subprocess\nimport sys\nimport numpy as np\nfrom src.datasets import CentriollesDatasetOn, CentriollesDatasetBags, GENdataset\nfrom src.utils import get_basic_transforms, log_info, get_resps_transforms\nimport src.implemented_models as impl_models\nimport torch\nfrom inferno.trainers.basic import Trainer\nfrom torch.utils.data import DataLoader\nfrom inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger\nfrom inferno.trainers.callbacks.scheduling import AutoLR\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Run learning of simple CNN implementation')\n parser.add_argument('--model_name', type=str, default='', help=\n 'Name of the model from models dir')\n parser.add_argument('--test', action='store_true', help=\n 'Test this model on simpler dataset')\n parser.add_argument('--features', action='store_true', help=\n 'Representation of repsponces')\n parser.add_argument('--mil', action='store_true', help=\n 'Continue learning on the bag lavel')\n parser.add_argument('--id', type=str, default='default', help=\n 'Unique net id to save')\n parser.add_argument('--img_size', type=int, default=60, help=\n 'Size of input images')\n args = parser.parse_args()\n log_info('Params: ' + str(args))\n if args.mil:\n train_tr, test_tr = get_resps_transforms(features=args.features)\n if args.test:\n train_ds = GENdataset(transform=train_tr, bags=False, crop=True)\n test_ds = GENdataset(train=False, transform=test_tr, bags=False,\n crop=True)\n log_info('Artificial MIL data is used')\n else:\n train_ds = CentriollesDatasetBags(transform=train_tr, inp_size=\n 512, bags=False, crop=True)\n test_ds = CentriollesDatasetBags(train=False, transform=test_tr,\n inp_size=512, bags=False, crop=True)\n log_info('MIL dataset is used')\n else:\n train_tr, test_tr = get_basic_transforms()\n if args.test:\n train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir=\n 'dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args\n .img_size)\n test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir=\n 'dataset/mnist/1', neg_dir='dataset/mnist/0', inp_size=args\n .img_size, train=False)\n log_info('Test bags dataset is used')\n else:\n train_ds = CentriollesDatasetOn(transform=train_tr, pos_dir=\n 'dataset/artificial/train_pos/', neg_dir=\n 'dataset/artificial/train_neg/', inp_size=args.img_size,\n all_data=True)\n test_ds = CentriollesDatasetOn(transform=test_tr, pos_dir=\n 'dataset/artificial/test_pos/', neg_dir=\n 'dataset/artificial/test_neg/', inp_size=args.img_size,\n all_data=True)\n log_info('ILC dataset is used')\n train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0)\n test_dl = DataLoader(test_ds, batch_size=4, shuffle=True, num_workers=0)\n log_info('Datasets are initialized!')\n exec('model = impl_models.%s' % args.model_name)\n model_dir = os.path.join('models', args.model_name)\n curent_model_dir = os.path.join(model_dir, args.id)\n log_info('Model will be saved to %s' % curent_model_dir)\n log_info(' + Number of params: {}'.format(sum([p.data.nelement() for p in\n model.parameters()])))\n weight_dir = os.path.join(curent_model_dir, 'weights')\n log_info('Weights will be saved to %s' % weight_dir)\n if not os.path.exists(weight_dir):\n os.mkdir(weight_dir)\n logs_dir = os.path.join(curent_model_dir, 'logs')\n if not os.path.exists(logs_dir):\n os.mkdir(logs_dir)\n log_info('Logs will be saved to %s' % logs_dir)\n logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),\n log_images_every=(np.inf, 'epochs'))\n\n def log_histogram(self, tag, values, bins=1000):\n pass\n logger.log_histogram = log_histogram\n trainer = Trainer(model).build_criterion('CrossEntropyLoss').build_metric(\n 'CategoricalError').build_optimizer('Adam').validate_every((2,\n 'epochs')).save_every((5, 'epochs')).save_to_directory(weight_dir\n ).set_max_num_epochs(10000).build_logger(logger, log_directory=logs_dir\n ).register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=\n 0.9, monitor_while='validating',\n consider_improvement_with_respect_to='best'))\n trainer.bind_loader('train', train_dl).bind_loader('validate', test_dl)\n if torch.cuda.is_available():\n trainer.cuda()\n trainer.fit()\n", "step-4": "#!/g/kreshuk/lukoianov/miniconda3/envs/inferno/bin/python3\n\n# BASIC IMPORTS\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport numpy as np\n\n# INTERNAL IMPORTS\nfrom src.datasets import CentriollesDatasetOn, CentriollesDatasetBags, GENdataset\nfrom src.utils import get_basic_transforms, log_info, get_resps_transforms\nimport src.implemented_models as impl_models\n\n# INFERNO IMPORTS\nimport torch\nfrom inferno.trainers.basic import Trainer\nfrom torch.utils.data import DataLoader\nfrom inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger\nfrom inferno.trainers.callbacks.scheduling import AutoLR\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run learning of simple CNN implementation')\n\n parser.add_argument('--model_name', type=str, default='', help='Name of the model from models dir')\n parser.add_argument('--test', action='store_true', help='Test this model on simpler dataset')\n parser.add_argument('--features', action='store_true', help='Representation of repsponces')\n parser.add_argument('--mil', action='store_true', help='Continue learning on the bag lavel')\n parser.add_argument('--id', type=str, default='default', help='Unique net id to save')\n parser.add_argument('--img_size', type=int, default=60, help='Size of input images')\n\n args = parser.parse_args()\n log_info('Params: ' + str(args))\n\n if args.mil:\n train_tr, test_tr = get_resps_transforms(features=args.features)\n if args.test:\n train_ds = GENdataset(transform=train_tr, bags=False, crop=True)\n test_ds = GENdataset(train=False, transform=test_tr, bags=False, crop=True)\n log_info('Artificial MIL data is used')\n else:\n train_ds = CentriollesDatasetBags(transform=train_tr,\n inp_size=512, bags=False, crop=True)\n test_ds = CentriollesDatasetBags(train=False, transform=test_tr,\n inp_size=512, bags=False, crop=True)\n log_info('MIL dataset is used')\n else:\n train_tr, test_tr = get_basic_transforms()\n if args.test:\n train_ds = CentriollesDatasetOn(transform=train_tr,\n pos_dir='dataset/mnist/1',\n neg_dir='dataset/mnist/0', inp_size=args.img_size)\n test_ds = CentriollesDatasetOn(transform=test_tr,\n pos_dir='dataset/mnist/1',\n neg_dir='dataset/mnist/0', inp_size=args.img_size, train=False)\n log_info('Test bags dataset is used')\n else:\n train_ds = CentriollesDatasetOn(transform=train_tr,\n pos_dir='dataset/artificial/train_pos/',\n neg_dir='dataset/artificial/train_neg/',\n inp_size=args.img_size, all_data=True)\n test_ds = CentriollesDatasetOn(transform=test_tr,\n pos_dir='dataset/artificial/test_pos/',\n neg_dir='dataset/artificial/test_neg/',\n inp_size=args.img_size, all_data=True)\n log_info('ILC dataset is used')\n\n train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0)\n test_dl = DataLoader(test_ds, batch_size=4, shuffle=True, num_workers=0)\n\n log_info('Datasets are initialized!')\n\n # DIRS AND MODEL\n exec(\"model = impl_models.%s\" % (args.model_name))\n\n model_dir = os.path.join('models', args.model_name)\n curent_model_dir = os.path.join(model_dir, args.id)\n log_info('Model will be saved to %s' % (curent_model_dir))\n log_info(' + Number of params: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\n weight_dir = os.path.join(curent_model_dir, 'weights')\n log_info('Weights will be saved to %s' % (weight_dir))\n if not os.path.exists(weight_dir):\n os.mkdir(weight_dir)\n logs_dir = os.path.join(curent_model_dir, 'logs')\n if not os.path.exists(logs_dir):\n os.mkdir(logs_dir)\n log_info('Logs will be saved to %s' % (logs_dir))\n\n # Build trainer\n logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),\n log_images_every=(np.inf, 'epochs'))\n\n def log_histogram(self, tag, values, bins=1000):\n pass\n logger.log_histogram = log_histogram\n\n trainer = Trainer(model)\\\n .build_criterion('CrossEntropyLoss') \\\n .build_metric('CategoricalError') \\\n .build_optimizer('Adam') \\\n .validate_every((2, 'epochs')) \\\n .save_every((5, 'epochs')) \\\n .save_to_directory(weight_dir) \\\n .set_max_num_epochs(10000) \\\n .build_logger(logger, log_directory=logs_dir) \\\n .register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=0.9,\n monitor_while='validating',\n consider_improvement_with_respect_to='best'))\n\n # Bind loaders\n trainer \\\n .bind_loader('train', train_dl) \\\n .bind_loader('validate', test_dl)\n\n if torch.cuda.is_available():\n trainer.cuda()\n\n trainer.fit()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from py.test import raises from ..lazymap import LazyMap def test_lazymap(): data = list(range(10)) lm = LazyMap(data, lambda x: 2 * x) assert len(lm) == 10 assert lm[1] == 2 assert isinstance(lm[1:4], LazyMap) assert lm.append == data.append assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>' def test_lazymap_iter(): data = list(range(2)) lm = LazyMap(data, lambda x: 2 * x) iter_lm = iter(lm) assert iter_lm.next() == 0 assert iter_lm.next() == 2 with raises(StopIteration): iter_lm.next()
normal
{ "blob_id": "3e7d80fdd1adb570934e4b252bc25d5746b4c68e", "index": 3912, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\ndef test_lazymap_iter():\n data = list(range(2))\n lm = LazyMap(data, lambda x: 2 * x)\n iter_lm = iter(lm)\n assert iter_lm.next() == 0\n assert iter_lm.next() == 2\n with raises(StopIteration):\n iter_lm.next()\n", "step-4": "from py.test import raises\nfrom ..lazymap import LazyMap\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\ndef test_lazymap_iter():\n data = list(range(2))\n lm = LazyMap(data, lambda x: 2 * x)\n iter_lm = iter(lm)\n assert iter_lm.next() == 0\n assert iter_lm.next() == 2\n with raises(StopIteration):\n iter_lm.next()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Stanley H.I. Lio # [email protected] # All Rights Reserved. 2018 import logging, time, sys from serial import Serial from . import aanderaa_3835 from . import aanderaa_4330f from . import aanderaa_4531d from . import aanderaa_4319a logger = logging.getLogger(__name__) # works with 3835 (DO), 4330F (DO), 4531D (DO), and 4319A (EC) def aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835, aanderaa_4319a.parse_4319a]): logger.debug('aanderaa_read_universal()') with Serial(port, 9600, timeout=2) as ser: r = None for _ in range(max_retry): ser.flush() ser.write(b'\r\ndo sample\r\n') try: line = ser.readline() line = filter(lambda c: c <= 0x7f, line) line = bytearray(filter(lambda c: c not in ['\x11', '\x13'], line)) # the control characters line = line.decode().strip() #print([ord(c) for c in line]) if len(line) <= 0: logger.debug('(no response)') continue elif any([c in line for c in '#*']): logger.debug('(junk)') logger.debug(line) logger.debug([ord(c) for c in line]) continue elif 'SYNTAX ERROR' in line: logger.debug('(SYNTAX ERROR)') logger.debug([ord(c) for c in line]) continue else: for f in parsers: logging.debug(f) try: r = f(line) if r is not None and len(r) > 0: break except ValueError: logger.debug('(valueerror)') else: time.sleep(1.29) ser.flush() except UnicodeDecodeError: logger.exception('UnicodeDecodeError: {}'.format(line)) ser.flush() if r is not None and len(r.keys()): break time.sleep(1.17) ser.flush() return r if '__main__' == __name__: logger.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) DEFAULT_PORT = '/dev/ttyS1' PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip() if len(PORT) <= 0: PORT = DEFAULT_PORT while True: try: print(aanderaa_read_universal(PORT)) except KeyboardInterrupt: print('user interrupted') break
normal
{ "blob_id": "c52ad4040c14471319939605c400ff4d4ad982a7", "index": 5213, "step-1": "<mask token>\n\n\ndef aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.\n parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835,\n aanderaa_4319a.parse_4319a]):\n logger.debug('aanderaa_read_universal()')\n with Serial(port, 9600, timeout=2) as ser:\n r = None\n for _ in range(max_retry):\n ser.flush()\n ser.write(b'\\r\\ndo sample\\r\\n')\n try:\n line = ser.readline()\n line = filter(lambda c: c <= 127, line)\n line = bytearray(filter(lambda c: c not in ['\\x11', '\\x13'],\n line))\n line = line.decode().strip()\n if len(line) <= 0:\n logger.debug('(no response)')\n continue\n elif any([(c in line) for c in '#*']):\n logger.debug('(junk)')\n logger.debug(line)\n logger.debug([ord(c) for c in line])\n continue\n elif 'SYNTAX ERROR' in line:\n logger.debug('(SYNTAX ERROR)')\n logger.debug([ord(c) for c in line])\n continue\n else:\n for f in parsers:\n logging.debug(f)\n try:\n r = f(line)\n if r is not None and len(r) > 0:\n break\n except ValueError:\n logger.debug('(valueerror)')\n else:\n time.sleep(1.29)\n ser.flush()\n except UnicodeDecodeError:\n logger.exception('UnicodeDecodeError: {}'.format(line))\n ser.flush()\n if r is not None and len(r.keys()):\n break\n time.sleep(1.17)\n ser.flush()\n return r\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.\n parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835,\n aanderaa_4319a.parse_4319a]):\n logger.debug('aanderaa_read_universal()')\n with Serial(port, 9600, timeout=2) as ser:\n r = None\n for _ in range(max_retry):\n ser.flush()\n ser.write(b'\\r\\ndo sample\\r\\n')\n try:\n line = ser.readline()\n line = filter(lambda c: c <= 127, line)\n line = bytearray(filter(lambda c: c not in ['\\x11', '\\x13'],\n line))\n line = line.decode().strip()\n if len(line) <= 0:\n logger.debug('(no response)')\n continue\n elif any([(c in line) for c in '#*']):\n logger.debug('(junk)')\n logger.debug(line)\n logger.debug([ord(c) for c in line])\n continue\n elif 'SYNTAX ERROR' in line:\n logger.debug('(SYNTAX ERROR)')\n logger.debug([ord(c) for c in line])\n continue\n else:\n for f in parsers:\n logging.debug(f)\n try:\n r = f(line)\n if r is not None and len(r) > 0:\n break\n except ValueError:\n logger.debug('(valueerror)')\n else:\n time.sleep(1.29)\n ser.flush()\n except UnicodeDecodeError:\n logger.exception('UnicodeDecodeError: {}'.format(line))\n ser.flush()\n if r is not None and len(r.keys()):\n break\n time.sleep(1.17)\n ser.flush()\n return r\n\n\nif '__main__' == __name__:\n logger.setLevel(logging.INFO)\n logging.basicConfig(level=logging.INFO)\n DEFAULT_PORT = '/dev/ttyS1'\n PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip()\n if len(PORT) <= 0:\n PORT = DEFAULT_PORT\n while True:\n try:\n print(aanderaa_read_universal(PORT))\n except KeyboardInterrupt:\n print('user interrupted')\n break\n", "step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\ndef aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.\n parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835,\n aanderaa_4319a.parse_4319a]):\n logger.debug('aanderaa_read_universal()')\n with Serial(port, 9600, timeout=2) as ser:\n r = None\n for _ in range(max_retry):\n ser.flush()\n ser.write(b'\\r\\ndo sample\\r\\n')\n try:\n line = ser.readline()\n line = filter(lambda c: c <= 127, line)\n line = bytearray(filter(lambda c: c not in ['\\x11', '\\x13'],\n line))\n line = line.decode().strip()\n if len(line) <= 0:\n logger.debug('(no response)')\n continue\n elif any([(c in line) for c in '#*']):\n logger.debug('(junk)')\n logger.debug(line)\n logger.debug([ord(c) for c in line])\n continue\n elif 'SYNTAX ERROR' in line:\n logger.debug('(SYNTAX ERROR)')\n logger.debug([ord(c) for c in line])\n continue\n else:\n for f in parsers:\n logging.debug(f)\n try:\n r = f(line)\n if r is not None and len(r) > 0:\n break\n except ValueError:\n logger.debug('(valueerror)')\n else:\n time.sleep(1.29)\n ser.flush()\n except UnicodeDecodeError:\n logger.exception('UnicodeDecodeError: {}'.format(line))\n ser.flush()\n if r is not None and len(r.keys()):\n break\n time.sleep(1.17)\n ser.flush()\n return r\n\n\nif '__main__' == __name__:\n logger.setLevel(logging.INFO)\n logging.basicConfig(level=logging.INFO)\n DEFAULT_PORT = '/dev/ttyS1'\n PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip()\n if len(PORT) <= 0:\n PORT = DEFAULT_PORT\n while True:\n try:\n print(aanderaa_read_universal(PORT))\n except KeyboardInterrupt:\n print('user interrupted')\n break\n", "step-4": "import logging, time, sys\nfrom serial import Serial\nfrom . import aanderaa_3835\nfrom . import aanderaa_4330f\nfrom . import aanderaa_4531d\nfrom . import aanderaa_4319a\nlogger = logging.getLogger(__name__)\n\n\ndef aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.\n parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835,\n aanderaa_4319a.parse_4319a]):\n logger.debug('aanderaa_read_universal()')\n with Serial(port, 9600, timeout=2) as ser:\n r = None\n for _ in range(max_retry):\n ser.flush()\n ser.write(b'\\r\\ndo sample\\r\\n')\n try:\n line = ser.readline()\n line = filter(lambda c: c <= 127, line)\n line = bytearray(filter(lambda c: c not in ['\\x11', '\\x13'],\n line))\n line = line.decode().strip()\n if len(line) <= 0:\n logger.debug('(no response)')\n continue\n elif any([(c in line) for c in '#*']):\n logger.debug('(junk)')\n logger.debug(line)\n logger.debug([ord(c) for c in line])\n continue\n elif 'SYNTAX ERROR' in line:\n logger.debug('(SYNTAX ERROR)')\n logger.debug([ord(c) for c in line])\n continue\n else:\n for f in parsers:\n logging.debug(f)\n try:\n r = f(line)\n if r is not None and len(r) > 0:\n break\n except ValueError:\n logger.debug('(valueerror)')\n else:\n time.sleep(1.29)\n ser.flush()\n except UnicodeDecodeError:\n logger.exception('UnicodeDecodeError: {}'.format(line))\n ser.flush()\n if r is not None and len(r.keys()):\n break\n time.sleep(1.17)\n ser.flush()\n return r\n\n\nif '__main__' == __name__:\n logger.setLevel(logging.INFO)\n logging.basicConfig(level=logging.INFO)\n DEFAULT_PORT = '/dev/ttyS1'\n PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip()\n if len(PORT) <= 0:\n PORT = DEFAULT_PORT\n while True:\n try:\n print(aanderaa_read_universal(PORT))\n except KeyboardInterrupt:\n print('user interrupted')\n break\n", "step-5": "# Stanley H.I. Lio\n# [email protected]\n# All Rights Reserved. 2018\nimport logging, time, sys\nfrom serial import Serial\nfrom . import aanderaa_3835\nfrom . import aanderaa_4330f\nfrom . import aanderaa_4531d\nfrom . import aanderaa_4319a\n\n\nlogger = logging.getLogger(__name__)\n\n\n# works with 3835 (DO), 4330F (DO), 4531D (DO), and 4319A (EC)\ndef aanderaa_read_universal(port, max_retry=3, parsers=[aanderaa_4531d.parse_4531d, aanderaa_4330f.parse_4330f, aanderaa_3835.parse_3835, aanderaa_4319a.parse_4319a]):\n logger.debug('aanderaa_read_universal()')\n \n with Serial(port, 9600, timeout=2) as ser:\n\n r = None\n for _ in range(max_retry):\n\n ser.flush()\n ser.write(b'\\r\\ndo sample\\r\\n')\n try:\n line = ser.readline()\n line = filter(lambda c: c <= 0x7f, line)\n line = bytearray(filter(lambda c: c not in ['\\x11', '\\x13'], line)) # the control characters\n line = line.decode().strip()\n #print([ord(c) for c in line])\n\n if len(line) <= 0:\n logger.debug('(no response)') \n continue\n elif any([c in line for c in '#*']):\n logger.debug('(junk)')\n logger.debug(line)\n logger.debug([ord(c) for c in line])\n continue\n elif 'SYNTAX ERROR' in line:\n logger.debug('(SYNTAX ERROR)')\n logger.debug([ord(c) for c in line])\n continue\n else:\n for f in parsers:\n logging.debug(f)\n try:\n r = f(line)\n if r is not None and len(r) > 0:\n break\n except ValueError:\n logger.debug('(valueerror)')\n else:\n time.sleep(1.29)\n ser.flush()\n\n except UnicodeDecodeError:\n logger.exception('UnicodeDecodeError: {}'.format(line))\n ser.flush()\n\n if r is not None and len(r.keys()):\n break\n\n time.sleep(1.17)\n\n ser.flush()\n return r\n\n\nif '__main__' == __name__:\n\n logger.setLevel(logging.INFO)\n logging.basicConfig(level=logging.INFO)\n\n DEFAULT_PORT = '/dev/ttyS1'\n PORT = input('PORT=? (default={})'.format(DEFAULT_PORT)).strip()\n if len(PORT) <= 0:\n PORT = DEFAULT_PORT\n\n while True:\n try:\n print(aanderaa_read_universal(PORT))\n except KeyboardInterrupt:\n print('user interrupted')\n break\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 5 10:04:05 2019 @author: cristina """ import numpy as np from itertools import chain from numpy import linalg as LA diag = LA.eigh import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 13}) import time pi = np.pi exp = np.exp t1 = time.time() N = 2000 #number of sites M = 200 #number of empty sites m = 1.0 #effective mass delta =1.35/27211.6 #SC gap mu = 1.0/27211.6 #chemical potential mu = 0.0 a = 4.98/0.529 ##lattice constant phi = pi/2.0#phase of second SC phi = 0.0 H = np.zeros([2*(2*N + M), 2*(2*N + M)], dtype=complex) h = np.zeros([2*N + M, 2*N + M, 2, 2], dtype=complex) factor = 1/(m*a**2) - mu factor_2 = -1/(2*m*a**2) hopping = factor_2*10 hopping = 0.0 #diagonal terms range1_diagonal = range(N) range2_diagonal = range(N+M, 2*N+M - 1) for i in range1_diagonal: g_i = i h[g_i, g_i, 0, 1] = delta h[g_i, g_i, 1, 0] = delta h[g_i, g_i, 0, 0] = factor h[g_i, g_i, 1, 1] = - factor for i in range2_diagonal: g_i = i h[g_i, g_i, 0, 1] = delta*exp(1j*phi) h[g_i, g_i, 1, 0] = delta*exp(-1j*phi) h[g_i, g_i, 0, 0] = factor h[g_i, g_i, 1, 1] = - factor #off - diagonal terms range1_offdiagonal = range(N - 1) range2_offdiagonal = range(N+M, 2*N+M - 1) range_offdiagonal = chain(range1_offdiagonal, range2_offdiagonal) for i in range_offdiagonal: g_i = i g_j = i + 1 h[g_i, g_j, 0, 0] = factor_2 h[g_i, g_j, 1, 1] = - factor_2 h[g_j, g_i, 0, 0] = factor_2 h[g_j, g_i, 1, 1] = - factor_2 #hopping between the 2 Chains h[N - 1, N + M, 0, 0] = hopping h[N - 1, N + M, 1, 1] = - hopping h[N + M, N - 1, 0, 0] = hopping h[N + M, N - 1, 1, 1] = - hopping for i in range(2*N + M): for j in range(2*N + M): for t_i in range(2): for t_j in range(2): H[(i) * 2 + t_i, (j) * 2 + t_j] = h[i, j, t_i, t_j] H = np.matrix(H) T = np.allclose(H, H.getH())###check if Hermitian print('Is H an Hermitian matrix?', T) (E, psi) = diag(H)####diagonalize H ####LDOS functions def LDOS_up(omega, E, u, Damping): t = sum ( u**2 / (omega - E + 1j*Damping) ) tt = -1/pi*np.imag(t) return(tt) def LDOS_down(omega, E, v, Damping): t = sum ( v**2 / (omega + E + 1j*Damping) ) tt = -1/pi*np.imag(t) return(tt) #### u and v components in the Nth atom u_borde1 = np.zeros(len(E)) v_borde1 = np.zeros(len(E)) I = N - 1 u_borde2 = np.zeros(len(E)) v_borde2 = np.zeros(len(E)) I2 = N + M - 1 u_bulk1 = np.zeros(len(E)) v_bulk1 = np.zeros(len(E)) I3 = int(N/2) - 1 u_bulk2 = np.zeros(len(E)) v_bulk2 = np.zeros(len(E)) I4 = N + M + int(N/2.0) - 1 I = N for i in range(len(E)): u_borde1[i] = psi[2*I-2,i] v_borde1[i] = psi[2*I-1,i] u_borde2[i] = psi[2*I2-2,i] v_borde2[i] = psi[2*I2-1,i] u_bulk1[i] = psi[2*I3-2,i] v_bulk1[i] = psi[2*I3-1,i] u_bulk2[i] = psi[2*I4-2,i] v_bulk2[i] = psi[2*I4-1,i] ###calculate LDOS omega = np.linspace(-4*delta, 4*delta, 2000)#omega vector LDOS_borde1_up = np.zeros(len(omega)) LDOS_borde1_down = np.zeros(len(omega)) LDOS_borde2_up = np.zeros(len(omega)) LDOS_borde2_down = np.zeros(len(omega)) LDOS_bulk1_up = np.zeros(len(omega)) LDOS_bulk1_down = np.zeros(len(omega)) LDOS_bulk2_up = np.zeros(len(omega)) LDOS_bulk2_down = np.zeros(len(omega)) D = 0.02/27211.6 for i in range(len(omega)): LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D) LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D) LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D) LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D) LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D) LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D) LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D) LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D) ###plot LDOS plt.figure(1) plt.plot(omega*27211.6, LDOS_borde1_up + LDOS_borde1_down) plt.plot(omega*27211.6, LDOS_borde1_up, label = 'up') plt.plot(omega*27211.6, LDOS_borde1_down, label = 'down') plt.title('Borde SC 1') #plt.title('Site %i' %I) plt.legend() plt.figure(2) plt.plot(omega*27211.6, LDOS_borde2_up + LDOS_borde2_down) plt.plot(omega*27211.6, LDOS_borde2_up, label = 'up') plt.plot(omega*27211.6, LDOS_borde2_down, label = 'down') plt.title('Borde SC 2') #plt.title('Site %i' %I) plt.legend() plt.figure(3) plt.plot(omega*27211.6, LDOS_bulk1_up + LDOS_bulk1_down) plt.plot(omega*27211.6, LDOS_bulk1_up, label = 'up') plt.plot(omega*27211.6, LDOS_bulk1_down, label = 'down') plt.title('Bulk SC 1') #plt.title('Site %i' %I) plt.legend() plt.figure(4) plt.plot(omega*27211.6, LDOS_bulk2_up + LDOS_bulk2_down) plt.plot(omega*27211.6, LDOS_bulk2_up, label = 'up') plt.plot(omega*27211.6, LDOS_bulk2_down, label = 'down') plt.title('Bulk SC 2') #plt.title('Site %i' %I) plt.legend() t2 = time.time() print('Program finished after', (t2 - t1)/60.0, 'mins')
normal
{ "blob_id": "f2ad95574b65b4d3e44b85c76f3a0150a3275cec", "index": 2356, "step-1": "<mask token>\n\n\ndef LDOS_up(omega, E, u, Damping):\n t = sum(u ** 2 / (omega - E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\ndef LDOS_down(omega, E, v, Damping):\n t = sum(v ** 2 / (omega + E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\n<mask token>\n", "step-2": "<mask token>\nplt.rcParams.update({'font.size': 13})\n<mask token>\nfor i in range1_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta\n h[g_i, g_i, 1, 0] = delta\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\nfor i in range2_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta * exp(1.0j * phi)\n h[g_i, g_i, 1, 0] = delta * exp(-1.0j * phi)\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\n<mask token>\nfor i in range_offdiagonal:\n g_i = i\n g_j = i + 1\n h[g_i, g_j, 0, 0] = factor_2\n h[g_i, g_j, 1, 1] = -factor_2\n h[g_j, g_i, 0, 0] = factor_2\n h[g_j, g_i, 1, 1] = -factor_2\n<mask token>\nfor i in range(2 * N + M):\n for j in range(2 * N + M):\n for t_i in range(2):\n for t_j in range(2):\n H[i * 2 + t_i, j * 2 + t_j] = h[i, j, t_i, t_j]\n<mask token>\nprint('Is H an Hermitian matrix?', T)\n<mask token>\n\n\ndef LDOS_up(omega, E, u, Damping):\n t = sum(u ** 2 / (omega - E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\ndef LDOS_down(omega, E, v, Damping):\n t = sum(v ** 2 / (omega + E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\n<mask token>\nfor i in range(len(E)):\n u_borde1[i] = psi[2 * I - 2, i]\n v_borde1[i] = psi[2 * I - 1, i]\n u_borde2[i] = psi[2 * I2 - 2, i]\n v_borde2[i] = psi[2 * I2 - 1, i]\n u_bulk1[i] = psi[2 * I3 - 2, i]\n v_bulk1[i] = psi[2 * I3 - 1, i]\n u_bulk2[i] = psi[2 * I4 - 2, i]\n v_bulk2[i] = psi[2 * I4 - 1, i]\n<mask token>\nfor i in range(len(omega)):\n LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D)\n LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D)\n LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D)\n LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D)\n LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D)\n LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D)\n LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D)\n LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D)\nplt.figure(1)\nplt.plot(omega * 27211.6, LDOS_borde1_up + LDOS_borde1_down)\nplt.plot(omega * 27211.6, LDOS_borde1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde1_down, label='down')\nplt.title('Borde SC 1')\nplt.legend()\nplt.figure(2)\nplt.plot(omega * 27211.6, LDOS_borde2_up + LDOS_borde2_down)\nplt.plot(omega * 27211.6, LDOS_borde2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde2_down, label='down')\nplt.title('Borde SC 2')\nplt.legend()\nplt.figure(3)\nplt.plot(omega * 27211.6, LDOS_bulk1_up + LDOS_bulk1_down)\nplt.plot(omega * 27211.6, LDOS_bulk1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk1_down, label='down')\nplt.title('Bulk SC 1')\nplt.legend()\nplt.figure(4)\nplt.plot(omega * 27211.6, LDOS_bulk2_up + LDOS_bulk2_down)\nplt.plot(omega * 27211.6, LDOS_bulk2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk2_down, label='down')\nplt.title('Bulk SC 2')\nplt.legend()\n<mask token>\nprint('Program finished after', (t2 - t1) / 60.0, 'mins')\n", "step-3": "<mask token>\ndiag = LA.eigh\n<mask token>\nplt.rcParams.update({'font.size': 13})\n<mask token>\npi = np.pi\nexp = np.exp\nt1 = time.time()\nN = 2000\nM = 200\nm = 1.0\ndelta = 1.35 / 27211.6\nmu = 1.0 / 27211.6\nmu = 0.0\na = 4.98 / 0.529\nphi = pi / 2.0\nphi = 0.0\nH = np.zeros([2 * (2 * N + M), 2 * (2 * N + M)], dtype=complex)\nh = np.zeros([2 * N + M, 2 * N + M, 2, 2], dtype=complex)\nfactor = 1 / (m * a ** 2) - mu\nfactor_2 = -1 / (2 * m * a ** 2)\nhopping = factor_2 * 10\nhopping = 0.0\nrange1_diagonal = range(N)\nrange2_diagonal = range(N + M, 2 * N + M - 1)\nfor i in range1_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta\n h[g_i, g_i, 1, 0] = delta\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\nfor i in range2_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta * exp(1.0j * phi)\n h[g_i, g_i, 1, 0] = delta * exp(-1.0j * phi)\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\nrange1_offdiagonal = range(N - 1)\nrange2_offdiagonal = range(N + M, 2 * N + M - 1)\nrange_offdiagonal = chain(range1_offdiagonal, range2_offdiagonal)\nfor i in range_offdiagonal:\n g_i = i\n g_j = i + 1\n h[g_i, g_j, 0, 0] = factor_2\n h[g_i, g_j, 1, 1] = -factor_2\n h[g_j, g_i, 0, 0] = factor_2\n h[g_j, g_i, 1, 1] = -factor_2\nh[N - 1, N + M, 0, 0] = hopping\nh[N - 1, N + M, 1, 1] = -hopping\nh[N + M, N - 1, 0, 0] = hopping\nh[N + M, N - 1, 1, 1] = -hopping\nfor i in range(2 * N + M):\n for j in range(2 * N + M):\n for t_i in range(2):\n for t_j in range(2):\n H[i * 2 + t_i, j * 2 + t_j] = h[i, j, t_i, t_j]\nH = np.matrix(H)\nT = np.allclose(H, H.getH())\nprint('Is H an Hermitian matrix?', T)\nE, psi = diag(H)\n\n\ndef LDOS_up(omega, E, u, Damping):\n t = sum(u ** 2 / (omega - E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\ndef LDOS_down(omega, E, v, Damping):\n t = sum(v ** 2 / (omega + E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\nu_borde1 = np.zeros(len(E))\nv_borde1 = np.zeros(len(E))\nI = N - 1\nu_borde2 = np.zeros(len(E))\nv_borde2 = np.zeros(len(E))\nI2 = N + M - 1\nu_bulk1 = np.zeros(len(E))\nv_bulk1 = np.zeros(len(E))\nI3 = int(N / 2) - 1\nu_bulk2 = np.zeros(len(E))\nv_bulk2 = np.zeros(len(E))\nI4 = N + M + int(N / 2.0) - 1\nI = N\nfor i in range(len(E)):\n u_borde1[i] = psi[2 * I - 2, i]\n v_borde1[i] = psi[2 * I - 1, i]\n u_borde2[i] = psi[2 * I2 - 2, i]\n v_borde2[i] = psi[2 * I2 - 1, i]\n u_bulk1[i] = psi[2 * I3 - 2, i]\n v_bulk1[i] = psi[2 * I3 - 1, i]\n u_bulk2[i] = psi[2 * I4 - 2, i]\n v_bulk2[i] = psi[2 * I4 - 1, i]\nomega = np.linspace(-4 * delta, 4 * delta, 2000)\nLDOS_borde1_up = np.zeros(len(omega))\nLDOS_borde1_down = np.zeros(len(omega))\nLDOS_borde2_up = np.zeros(len(omega))\nLDOS_borde2_down = np.zeros(len(omega))\nLDOS_bulk1_up = np.zeros(len(omega))\nLDOS_bulk1_down = np.zeros(len(omega))\nLDOS_bulk2_up = np.zeros(len(omega))\nLDOS_bulk2_down = np.zeros(len(omega))\nD = 0.02 / 27211.6\nfor i in range(len(omega)):\n LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D)\n LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D)\n LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D)\n LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D)\n LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D)\n LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D)\n LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D)\n LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D)\nplt.figure(1)\nplt.plot(omega * 27211.6, LDOS_borde1_up + LDOS_borde1_down)\nplt.plot(omega * 27211.6, LDOS_borde1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde1_down, label='down')\nplt.title('Borde SC 1')\nplt.legend()\nplt.figure(2)\nplt.plot(omega * 27211.6, LDOS_borde2_up + LDOS_borde2_down)\nplt.plot(omega * 27211.6, LDOS_borde2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde2_down, label='down')\nplt.title('Borde SC 2')\nplt.legend()\nplt.figure(3)\nplt.plot(omega * 27211.6, LDOS_bulk1_up + LDOS_bulk1_down)\nplt.plot(omega * 27211.6, LDOS_bulk1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk1_down, label='down')\nplt.title('Bulk SC 1')\nplt.legend()\nplt.figure(4)\nplt.plot(omega * 27211.6, LDOS_bulk2_up + LDOS_bulk2_down)\nplt.plot(omega * 27211.6, LDOS_bulk2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk2_down, label='down')\nplt.title('Bulk SC 2')\nplt.legend()\nt2 = time.time()\nprint('Program finished after', (t2 - t1) / 60.0, 'mins')\n", "step-4": "<mask token>\nimport numpy as np\nfrom itertools import chain\nfrom numpy import linalg as LA\ndiag = LA.eigh\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 13})\nimport time\npi = np.pi\nexp = np.exp\nt1 = time.time()\nN = 2000\nM = 200\nm = 1.0\ndelta = 1.35 / 27211.6\nmu = 1.0 / 27211.6\nmu = 0.0\na = 4.98 / 0.529\nphi = pi / 2.0\nphi = 0.0\nH = np.zeros([2 * (2 * N + M), 2 * (2 * N + M)], dtype=complex)\nh = np.zeros([2 * N + M, 2 * N + M, 2, 2], dtype=complex)\nfactor = 1 / (m * a ** 2) - mu\nfactor_2 = -1 / (2 * m * a ** 2)\nhopping = factor_2 * 10\nhopping = 0.0\nrange1_diagonal = range(N)\nrange2_diagonal = range(N + M, 2 * N + M - 1)\nfor i in range1_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta\n h[g_i, g_i, 1, 0] = delta\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\nfor i in range2_diagonal:\n g_i = i\n h[g_i, g_i, 0, 1] = delta * exp(1.0j * phi)\n h[g_i, g_i, 1, 0] = delta * exp(-1.0j * phi)\n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = -factor\nrange1_offdiagonal = range(N - 1)\nrange2_offdiagonal = range(N + M, 2 * N + M - 1)\nrange_offdiagonal = chain(range1_offdiagonal, range2_offdiagonal)\nfor i in range_offdiagonal:\n g_i = i\n g_j = i + 1\n h[g_i, g_j, 0, 0] = factor_2\n h[g_i, g_j, 1, 1] = -factor_2\n h[g_j, g_i, 0, 0] = factor_2\n h[g_j, g_i, 1, 1] = -factor_2\nh[N - 1, N + M, 0, 0] = hopping\nh[N - 1, N + M, 1, 1] = -hopping\nh[N + M, N - 1, 0, 0] = hopping\nh[N + M, N - 1, 1, 1] = -hopping\nfor i in range(2 * N + M):\n for j in range(2 * N + M):\n for t_i in range(2):\n for t_j in range(2):\n H[i * 2 + t_i, j * 2 + t_j] = h[i, j, t_i, t_j]\nH = np.matrix(H)\nT = np.allclose(H, H.getH())\nprint('Is H an Hermitian matrix?', T)\nE, psi = diag(H)\n\n\ndef LDOS_up(omega, E, u, Damping):\n t = sum(u ** 2 / (omega - E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\ndef LDOS_down(omega, E, v, Damping):\n t = sum(v ** 2 / (omega + E + 1.0j * Damping))\n tt = -1 / pi * np.imag(t)\n return tt\n\n\nu_borde1 = np.zeros(len(E))\nv_borde1 = np.zeros(len(E))\nI = N - 1\nu_borde2 = np.zeros(len(E))\nv_borde2 = np.zeros(len(E))\nI2 = N + M - 1\nu_bulk1 = np.zeros(len(E))\nv_bulk1 = np.zeros(len(E))\nI3 = int(N / 2) - 1\nu_bulk2 = np.zeros(len(E))\nv_bulk2 = np.zeros(len(E))\nI4 = N + M + int(N / 2.0) - 1\nI = N\nfor i in range(len(E)):\n u_borde1[i] = psi[2 * I - 2, i]\n v_borde1[i] = psi[2 * I - 1, i]\n u_borde2[i] = psi[2 * I2 - 2, i]\n v_borde2[i] = psi[2 * I2 - 1, i]\n u_bulk1[i] = psi[2 * I3 - 2, i]\n v_bulk1[i] = psi[2 * I3 - 1, i]\n u_bulk2[i] = psi[2 * I4 - 2, i]\n v_bulk2[i] = psi[2 * I4 - 1, i]\nomega = np.linspace(-4 * delta, 4 * delta, 2000)\nLDOS_borde1_up = np.zeros(len(omega))\nLDOS_borde1_down = np.zeros(len(omega))\nLDOS_borde2_up = np.zeros(len(omega))\nLDOS_borde2_down = np.zeros(len(omega))\nLDOS_bulk1_up = np.zeros(len(omega))\nLDOS_bulk1_down = np.zeros(len(omega))\nLDOS_bulk2_up = np.zeros(len(omega))\nLDOS_bulk2_down = np.zeros(len(omega))\nD = 0.02 / 27211.6\nfor i in range(len(omega)):\n LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D)\n LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D)\n LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D)\n LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D)\n LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D)\n LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D)\n LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D)\n LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D)\nplt.figure(1)\nplt.plot(omega * 27211.6, LDOS_borde1_up + LDOS_borde1_down)\nplt.plot(omega * 27211.6, LDOS_borde1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde1_down, label='down')\nplt.title('Borde SC 1')\nplt.legend()\nplt.figure(2)\nplt.plot(omega * 27211.6, LDOS_borde2_up + LDOS_borde2_down)\nplt.plot(omega * 27211.6, LDOS_borde2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_borde2_down, label='down')\nplt.title('Borde SC 2')\nplt.legend()\nplt.figure(3)\nplt.plot(omega * 27211.6, LDOS_bulk1_up + LDOS_bulk1_down)\nplt.plot(omega * 27211.6, LDOS_bulk1_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk1_down, label='down')\nplt.title('Bulk SC 1')\nplt.legend()\nplt.figure(4)\nplt.plot(omega * 27211.6, LDOS_bulk2_up + LDOS_bulk2_down)\nplt.plot(omega * 27211.6, LDOS_bulk2_up, label='up')\nplt.plot(omega * 27211.6, LDOS_bulk2_down, label='down')\nplt.title('Bulk SC 2')\nplt.legend()\nt2 = time.time()\nprint('Program finished after', (t2 - t1) / 60.0, 'mins')\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 10:04:05 2019\n\n@author: cristina\n\"\"\"\n\nimport numpy as np\nfrom itertools import chain\nfrom numpy import linalg as LA\ndiag = LA.eigh\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 13})\nimport time\n\npi = np.pi\nexp = np.exp\nt1 = time.time()\n\nN = 2000 #number of sites\nM = 200 #number of empty sites\nm = 1.0 #effective mass\ndelta =1.35/27211.6 #SC gap\nmu = 1.0/27211.6 #chemical potential\nmu = 0.0\na = 4.98/0.529 ##lattice constant\nphi = pi/2.0#phase of second SC\nphi = 0.0\n\nH = np.zeros([2*(2*N + M), 2*(2*N + M)], dtype=complex)\nh = np.zeros([2*N + M, 2*N + M, 2, 2], dtype=complex)\n\nfactor = 1/(m*a**2) - mu\nfactor_2 = -1/(2*m*a**2)\nhopping = factor_2*10\nhopping = 0.0\n\n#diagonal terms\nrange1_diagonal = range(N)\nrange2_diagonal = range(N+M, 2*N+M - 1)\n\nfor i in range1_diagonal:\n g_i = i\n \n h[g_i, g_i, 0, 1] = delta\n h[g_i, g_i, 1, 0] = delta \n \n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = - factor\n \n \nfor i in range2_diagonal:\n g_i = i\n \n h[g_i, g_i, 0, 1] = delta*exp(1j*phi)\n h[g_i, g_i, 1, 0] = delta*exp(-1j*phi) \n \n h[g_i, g_i, 0, 0] = factor\n h[g_i, g_i, 1, 1] = - factor\n\n#off - diagonal terms\nrange1_offdiagonal = range(N - 1)\nrange2_offdiagonal = range(N+M, 2*N+M - 1)\nrange_offdiagonal = chain(range1_offdiagonal, range2_offdiagonal)\n\nfor i in range_offdiagonal:\n g_i = i\n g_j = i + 1\n \n h[g_i, g_j, 0, 0] = factor_2\n h[g_i, g_j, 1, 1] = - factor_2 \n \n h[g_j, g_i, 0, 0] = factor_2\n h[g_j, g_i, 1, 1] = - factor_2\n \n\n#hopping between the 2 Chains\nh[N - 1, N + M, 0, 0] = hopping\nh[N - 1, N + M, 1, 1] = - hopping\n\nh[N + M, N - 1, 0, 0] = hopping\nh[N + M, N - 1, 1, 1] = - hopping\n\nfor i in range(2*N + M):\n for j in range(2*N + M):\n for t_i in range(2):\n for t_j in range(2):\n H[(i) * 2 + t_i, (j) * 2 + t_j] = h[i, j, t_i, t_j]\n \nH = np.matrix(H) \nT = np.allclose(H, H.getH())###check if Hermitian\nprint('Is H an Hermitian matrix?', T)\n\n(E, psi) = diag(H)####diagonalize H\n\n\n####LDOS functions\ndef LDOS_up(omega, E, u, Damping):\n t = sum ( u**2 / (omega - E + 1j*Damping) )\n tt = -1/pi*np.imag(t)\n return(tt)\n \ndef LDOS_down(omega, E, v, Damping):\n t = sum ( v**2 / (omega + E + 1j*Damping) )\n tt = -1/pi*np.imag(t)\n return(tt)\n\n\n\n#### u and v components in the Nth atom\nu_borde1 = np.zeros(len(E))\nv_borde1 = np.zeros(len(E))\nI = N - 1\n\nu_borde2 = np.zeros(len(E))\nv_borde2 = np.zeros(len(E))\nI2 = N + M - 1\n\nu_bulk1 = np.zeros(len(E))\nv_bulk1 = np.zeros(len(E))\nI3 = int(N/2) - 1\n\nu_bulk2 = np.zeros(len(E))\nv_bulk2 = np.zeros(len(E))\nI4 = N + M + int(N/2.0) - 1\n\nI = N \nfor i in range(len(E)):\n u_borde1[i] = psi[2*I-2,i]\n v_borde1[i] = psi[2*I-1,i]\n \n u_borde2[i] = psi[2*I2-2,i]\n v_borde2[i] = psi[2*I2-1,i]\n \n u_bulk1[i] = psi[2*I3-2,i]\n v_bulk1[i] = psi[2*I3-1,i]\n \n u_bulk2[i] = psi[2*I4-2,i]\n v_bulk2[i] = psi[2*I4-1,i]\n\n###calculate LDOS\nomega = np.linspace(-4*delta, 4*delta, 2000)#omega vector \n\nLDOS_borde1_up = np.zeros(len(omega))\nLDOS_borde1_down = np.zeros(len(omega))\n\nLDOS_borde2_up = np.zeros(len(omega))\nLDOS_borde2_down = np.zeros(len(omega))\n\nLDOS_bulk1_up = np.zeros(len(omega))\nLDOS_bulk1_down = np.zeros(len(omega))\n\nLDOS_bulk2_up = np.zeros(len(omega))\nLDOS_bulk2_down = np.zeros(len(omega))\n\nD = 0.02/27211.6\nfor i in range(len(omega)):\n\n LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D) \n LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D)\n \n LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D) \n LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D)\n \n LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D) \n LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D)\n \n LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D) \n LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D)\n\n\n###plot LDOS \nplt.figure(1)\nplt.plot(omega*27211.6, LDOS_borde1_up + LDOS_borde1_down) \nplt.plot(omega*27211.6, LDOS_borde1_up, label = 'up') \nplt.plot(omega*27211.6, LDOS_borde1_down, label = 'down')\nplt.title('Borde SC 1')\n#plt.title('Site %i' %I) \nplt.legend() \n\nplt.figure(2)\nplt.plot(omega*27211.6, LDOS_borde2_up + LDOS_borde2_down) \nplt.plot(omega*27211.6, LDOS_borde2_up, label = 'up') \nplt.plot(omega*27211.6, LDOS_borde2_down, label = 'down')\nplt.title('Borde SC 2')\n#plt.title('Site %i' %I) \nplt.legend() \n\nplt.figure(3)\nplt.plot(omega*27211.6, LDOS_bulk1_up + LDOS_bulk1_down) \nplt.plot(omega*27211.6, LDOS_bulk1_up, label = 'up') \nplt.plot(omega*27211.6, LDOS_bulk1_down, label = 'down')\nplt.title('Bulk SC 1')\n#plt.title('Site %i' %I) \nplt.legend()\n\nplt.figure(4)\nplt.plot(omega*27211.6, LDOS_bulk2_up + LDOS_bulk2_down) \nplt.plot(omega*27211.6, LDOS_bulk2_up, label = 'up') \nplt.plot(omega*27211.6, LDOS_bulk2_down, label = 'down')\nplt.title('Bulk SC 2')\n#plt.title('Site %i' %I) \nplt.legend() \n\n\n\nt2 = time.time()\nprint('Program finished after', (t2 - t1)/60.0, 'mins')\n\n\n\n\n\n\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/env python from __future__ import division import sys import math logs = sys.stderr from collections import defaultdict import time from mytime import Mytime import gflags as flags FLAGS=flags.FLAGS flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w") flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict") flags.DEFINE_boolean("featstat", False, "print feature stats") flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow") flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module") flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN") flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info") flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words") flags.DEFINE_boolean("unkdel", False, "remove features involving unks") flags.DEFINE_boolean("s2", True, "use s2t features") def new_vector(): return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda class Model(object): '''templates and weights.''' ## __slots__ = "templates", "weights", "list_templates", "freq_templates" names = ["SHIFT", "LEFT", "RIGHT"] indent = " " * 4 eval_module = None # by default, use my handwritten static_eval() def __init__(self, weightstr): self.knowns = set() self.unk = FLAGS.unk self.unktag = FLAGS.unktag self.unkdel = FLAGS.unkdel assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true" if FLAGS.svector: # now it is known global svector try: svector = __import__("svector") print >> logs, "WARNING: using David's svector (Cython). Performance might suffer." except: print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)." FLAGS.svector = False # important self.templates = {} # mapping from "s0t-q0t" to the eval expression self.list_templates = [] # ordered list of template keys "s0t-q0t" self.freq_templates = defaultdict(int) self.weights = new_vector() #Vector() self.read_weights(weightstr) ## self.featurenames = set(self.weights.iterkeys()) if FLAGS.featstat: self.print_templates() def count_knowns_from_train(self, trainfile, devfile): '''used in training''' print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag) stime = time.time() words = defaultdict(int) for i, line in enumerate(open(trainfile)): for word in line.split(): word = word.strip("()").rsplit("/", 1)[0] words[word] += 1 if FLAGS.debug_wordfreq: devunk1 = set() devunk0 = set() for line in open(devfile): for word in line.split(): word = word.strip("()").rsplit("/", 1)[0] if words[word] <= self.unk and words[word] > 0: devunk1.add(word) if words[word] == 0: devunk0.add(word) print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1)) print >> logs print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0)) ## freqs = defaultdict(list) ## for word, freq in words.items(): ## freqs[freq].append(word) ## for freq in sorted(freqs, reverse=True): ## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq])) ## print >> logs self.knowns = set() for word, freq in words.items(): if freq > self.unk: self.knowns.add(word) print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \ (i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime) ## print >> logs, " ".join(sorted(self.knowns)) def add_template(self, s, freq=1): ## like this: "s0w-s0t=%s|%s" % (s0w, s0t) symbols = s.split("-") # static part: s0w-s0t if s not in self.templates: tmp = '"%s=%s" %% (%s)' % (s, \ "|".join(["%s"] * len(symbols)), \ ", ".join(symbols)) self.templates[s] = compile(tmp, "2", "eval") self.list_templates.append((s, tmp)) # in order self.freq_templates[s] += int(freq) def print_autoevals(self): tfilename = str(int(time.time())) templatefile = open("/tmp/%s.py" % tfilename, "wt") print >> templatefile, "#generated by model.py" print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'" print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):" print >> templatefile, "%sreturn [" % Model.indent for s, e in self.list_templates: print >> templatefile, "%s%s," % (Model.indent * 2, e) print >> templatefile, "%s]" % (Model.indent * 2) templatefile.close() if FLAGS.autoeval: sys.path.append('/tmp/') print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename # to be used in newstate Model.eval_module = __import__(tfilename) else: Model.eval_module = Model def print_templates(self, f=logs): print >> f, ">>> %d templates in total:" % len(self.templates) print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \ for x, _ in self.list_templates]) print >> f, "---" def read_templates(self, filename): ## try interpreting it as a filename, if failed, then as a string try: f = open(filename) print >> logs, "reading templates from %s" % filename, for x in f: if x[:3] == "---": break if x[:3] == ">>>": continue try: s, freq = x.split() except: s, freq = x, 1 self.add_template(s, freq) except: ## from argv string rather than file for x in filename.split(): self.add_template(x) f = None print >> logs, "%d feature templates read." % len(self.templates) return f def read_weights(self, filename, infertemplates=False): '''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"''' infile = self.read_templates(filename) infertemplates = len(self.templates) <= 1 if infertemplates: print >> logs, "will infer templates from weights..." mytime = Mytime() i = 0 if infile is not None: print >> logs, "reading feature weights from %s\t" % filename, for i, line in enumerate(infile, 1): if i % 200000 == 0: print >> logs, "%d lines read..." % i, if line[0] == " ": # TODO: separate known words line (last line) self.knowns = set(line.split()) print >> logs, "\n%d known words read." % len(self.knowns) self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x break feat, weight = line.split() self.weights[feat] = float(weight) if infertemplates: self.add_template(feat.split("=", 1)[0], 1) ## one occurrence print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \ (len(self.weights), i, mytime.period()) self.print_autoevals() def make_feats(self, state): '''returns a *list* of feature templates for state.''' fv = new_vector() #Vector() top = state.top() topnext = state.top(1) top3rd = state.top(2) qhead = state.qhead() qnext = state.qhead(1) ## this part is manual; their combinations are automatic s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...) s1 = topnext.head() if topnext is not None else ("<s>", "<s>") s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>") q0 = qhead if qhead is not None else ("</s>", "</s>") q1 = qnext if qnext is not None else ("</s>", "</s>") s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE" s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE" s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE" s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE" ## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here! return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct)) # return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys def write(self, filename="-", weights=None): if weights is None: weights = self.weights if filename == "-": outfile = sys.stdout filename = "STDOUT" # careful overriding else: outfile = open(filename, "wt") self.print_templates(outfile) mytime = Mytime() nonzero = 0 print >> logs, "sorting %d features..." % len(weights), for i, f in enumerate(sorted(weights), 1): if i == 1: # sorting done print >> logs, "done in %.2lf seconds." % mytime.period() print >> logs, "writing features to %s..." % filename v = weights[f] if math.fabs(v) > 1e-3: print >> outfile, "%s\t%.5lf" % (f, v) nonzero += 1 if self.unk > 0: # print known words print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \ (nonzero, mytime.period()) ## nonzero != i @staticmethod def trim(fv): for f in fv: if math.fabs(fv[f]) < 1e-3: del fv[f] return fv @staticmethod def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)): return ["q0t=%s" % (q0t), "q0w-q0t=%s|%s" % (q0w, q0t), "q0w=%s" % (q0w), "s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t), "s0t-q0t=%s|%s" % (s0t, q0t), "s0t-s1t=%s|%s" % (s0t, s1t), "s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t), "s0t=%s" % (s0t), "s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t), "s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t), "s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t), "s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w), "s0w-s0t=%s|%s" % (s0w, s0t), "s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t), "s0w-s1w=%s|%s" % (s0w, s1w), "s0w=%s" % (s0w), "s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t), "s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct), "s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct), "s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t), "s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct), "s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t), "s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w), "s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t), "s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w), "s1t=%s" % (s1t), "s1w-s1t=%s|%s" % (s1w, s1t), "s1w=%s" % (s1w), "s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)] def prune(self, filenames): '''prune features from word/tag lines''' print >> logs, "pruning features using %s..." % filenames, fullset = set() for filename in filenames.split(): for l in open(filename): for w, t in map(lambda x:x.rsplit("/", 1), l.split()): fullset.add(w) fullset.add(t) print >> logs, "collected %d uniq words & tags..." % (len(fullset)), new = new_vector() # Vector() for f in self.weights: stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution for s in stuff: if s not in fullset: break else: new[f] = self.weights[f] print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights)) self.weights = new def sparsify(self, z=1): '''duchi et al., 2008''' if __name__ == "__main__": flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)") try: argv = FLAGS(sys.argv) if FLAGS.weights is None: raise flags.FlagsError("must specify weights by -w ...") except flags.FlagsError, e: print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS) sys.exit(1) FLAGS.featstat = True model = Model(FLAGS.weights) #.model, FLAGS.weights) if FLAGS.prune: model.prune(FLAGS.prune) if FLAGS.outputweights: model.write(FLAGS.outputweights)
normal
{ "blob_id": "e5fd0fc13a39444a934eea3bd24056073d28eff2", "index": 9869, "step-1": "#!/usr/bin/env python\n\nfrom __future__ import division\n\nimport sys\nimport math\nlogs = sys.stderr\nfrom collections import defaultdict\n\nimport time\nfrom mytime import Mytime\n\nimport gflags as flags\nFLAGS=flags.FLAGS\n\nflags.DEFINE_string(\"weights\", None, \"weights file (feature instances and weights)\", short_name=\"w\")\nflags.DEFINE_boolean(\"svector\", False, \"use David's svector (Cython) instead of Pythonic defaultdict\")\nflags.DEFINE_boolean(\"featstat\", False, \"print feature stats\")\nflags.DEFINE_string(\"outputweights\", None, \"write weights (in short-hand format); - for STDOUT\", short_name=\"ow\")\nflags.DEFINE_boolean(\"autoeval\", True, \"use automatically generated eval module\")\nflags.DEFINE_integer(\"unk\", 0, \"treat words with count less than COUNT as UNKNOWN\")\nflags.DEFINE_boolean(\"debug_wordfreq\", False, \"print word freq info\")\nflags.DEFINE_boolean(\"unktag\", False, \"use POS tags for unknown words\")\nflags.DEFINE_boolean(\"unkdel\", False, \"remove features involving unks\")\nflags.DEFINE_boolean(\"s2\", True, \"use s2t features\")\n \ndef new_vector():\n return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda \n\nclass Model(object):\n '''templates and weights.'''\n\n## __slots__ = \"templates\", \"weights\", \"list_templates\", \"freq_templates\"\n\n names = [\"SHIFT\", \"LEFT\", \"RIGHT\"]\n indent = \" \" * 4\n eval_module = None # by default, use my handwritten static_eval()\n \n def __init__(self, weightstr):\n\n self.knowns = set()\n self.unk = FLAGS.unk\n self.unktag = FLAGS.unktag\n self.unkdel = FLAGS.unkdel\n assert not (self.unkdel and self.unktag), \"UNKDEL and UNKTAG can't be both true\"\n\n if FLAGS.svector: # now it is known\n global svector\n try:\n svector = __import__(\"svector\")\n print >> logs, \"WARNING: using David's svector (Cython). Performance might suffer.\"\n except:\n print >> logs, \"WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster).\"\n FLAGS.svector = False # important\n\n self.templates = {} # mapping from \"s0t-q0t\" to the eval expression\n self.list_templates = [] # ordered list of template keys \"s0t-q0t\"\n self.freq_templates = defaultdict(int)\n self.weights = new_vector() #Vector()\n\n self.read_weights(weightstr)\n## self.featurenames = set(self.weights.iterkeys())\n\n if FLAGS.featstat:\n self.print_templates()\n\n def count_knowns_from_train(self, trainfile, devfile):\n '''used in training'''\n\n print >> logs, \"counting word freqs from %s, unktag=%s\" % (trainfile, self.unktag)\n stime = time.time()\n\n words = defaultdict(int) \n for i, line in enumerate(open(trainfile)):\n for word in line.split():\n word = word.strip(\"()\").rsplit(\"/\", 1)[0]\n words[word] += 1\n\n if FLAGS.debug_wordfreq:\n devunk1 = set()\n devunk0 = set()\n for line in open(devfile): \n for word in line.split():\n word = word.strip(\"()\").rsplit(\"/\", 1)[0]\n if words[word] <= self.unk and words[word] > 0:\n devunk1.add(word)\n if words[word] == 0:\n devunk0.add(word)\n \n print >> logs, \"=1\", len(devunk1), \" \".join(sorted(devunk1))\n print >> logs\n print >> logs, \"=0\", len(devunk0), \" \".join(sorted(devunk0))\n\n## freqs = defaultdict(list)\n## for word, freq in words.items():\n## freqs[freq].append(word)\n\n## for freq in sorted(freqs, reverse=True):\n## print >> logs, freq, len(freqs[freq]), \" \".join(sorted(freqs[freq]))\n## print >> logs\n\n self.knowns = set()\n for word, freq in words.items():\n if freq > self.unk:\n self.knowns.add(word)\n\n print >> logs, \"%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds\" % \\\n (i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)\n## print >> logs, \" \".join(sorted(self.knowns))\n\n def add_template(self, s, freq=1):\n ## like this: \"s0w-s0t=%s|%s\" % (s0w, s0t) \n symbols = s.split(\"-\") # static part: s0w-s0t\n if s not in self.templates:\n tmp = '\"%s=%s\" %% (%s)' % (s, \\\n \"|\".join([\"%s\"] * len(symbols)), \\\n \", \".join(symbols))\n \n self.templates[s] = compile(tmp, \"2\", \"eval\")\n \n self.list_templates.append((s, tmp)) # in order\n\n self.freq_templates[s] += int(freq)\n\n def print_autoevals(self):\n\n tfilename = str(int(time.time()))\n templatefile = open(\"/tmp/%s.py\" % tfilename, \"wt\")\n \n print >> templatefile, \"#generated by model.py\"\n print >> templatefile, \"import sys; print >> sys.stderr, 'importing succeeded!'\"\n print >> templatefile, \"def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):\"\n print >> templatefile, \"%sreturn [\" % Model.indent\n \n for s, e in self.list_templates:\n print >> templatefile, \"%s%s,\" % (Model.indent * 2, e)\n \n print >> templatefile, \"%s]\" % (Model.indent * 2)\n templatefile.close()\n\n if FLAGS.autoeval:\n sys.path.append('/tmp/')\n print >> logs, \"importing auto-generated file /tmp/%s.py\" % tfilename\n # to be used in newstate\n Model.eval_module = __import__(tfilename)\n else:\n Model.eval_module = Model \n \n def print_templates(self, f=logs):\n print >> f, \">>> %d templates in total:\" % len(self.templates)\n print >> f, \"\\n\".join([\"%-20s\\t%d\" % (x, self.freq_templates[x]) \\\n for x, _ in self.list_templates])\n print >> f, \"---\"\n\n def read_templates(self, filename):\n\n ## try interpreting it as a filename, if failed, then as a string\n try:\n f = open(filename)\n print >> logs, \"reading templates from %s\" % filename,\n for x in f:\n if x[:3] == \"---\":\n break\n if x[:3] == \">>>\":\n continue\n try:\n s, freq = x.split()\n except:\n s, freq = x, 1\n self.add_template(s, freq) \n \n except:\n ## from argv string rather than file\n for x in filename.split():\n self.add_template(x)\n f = None\n\n print >> logs, \"%d feature templates read.\" % len(self.templates)\n\n return f\n\n def read_weights(self, filename, infertemplates=False):\n '''instances are like \"s0t-q0t=LRB-</s>=>LEFT 3.8234\"'''\n\n infile = self.read_templates(filename)\n\n infertemplates = len(self.templates) <= 1\n if infertemplates:\n print >> logs, \"will infer templates from weights...\" \n\n mytime = Mytime()\n i = 0\n if infile is not None:\n print >> logs, \"reading feature weights from %s\\t\" % filename,\n for i, line in enumerate(infile, 1):\n if i % 200000 == 0:\n print >> logs, \"%d lines read...\" % i,\n\n if line[0] == \" \":\n # TODO: separate known words line (last line)\n self.knowns = set(line.split())\n print >> logs, \"\\n%d known words read.\" % len(self.knowns)\n self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x\n break\n\n feat, weight = line.split() \n self.weights[feat] = float(weight)\n\n if infertemplates:\n self.add_template(feat.split(\"=\", 1)[0], 1) ## one occurrence\n\n print >> logs, \"\\n%d feature instances (%d lines) read in %.2lf seconds.\" % \\\n (len(self.weights), i, mytime.period())\n\n self.print_autoevals()\n\n def make_feats(self, state):\n '''returns a *list* of feature templates for state.'''\n \n fv = new_vector() #Vector()\n top = state.top()\n topnext = state.top(1)\n top3rd = state.top(2)\n qhead = state.qhead()\n qnext = state.qhead(1)\n\n ## this part is manual; their combinations are automatic\n s0 = top.head() if top is not None else (\"<s>\", \"<s>\") # N.B. (...)\n s1 = topnext.head() if topnext is not None else (\"<s>\", \"<s>\") \n s2 = top3rd.head() if top3rd is not None else (\"<s>\", \"<s>\") \n\n q0 = qhead if qhead is not None else (\"</s>\", \"</s>\") \n q1 = qnext if qnext is not None else (\"</s>\", \"</s>\")\n\n s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else \"NONE\"\n s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else \"NONE\"\n s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else \"NONE\"\n s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else \"NONE\"\n \n ## like this: \"s0w-s0t=%s|%s\" % (s0w, s0t) ---> returns a list here!\n return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))\n# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys\n\n def write(self, filename=\"-\", weights=None):\n\n if weights is None:\n weights = self.weights\n\n if filename == \"-\":\n outfile = sys.stdout\n filename = \"STDOUT\" # careful overriding\n else:\n outfile = open(filename, \"wt\")\n\n self.print_templates(outfile)\n\n mytime = Mytime()\n\n nonzero = 0\n print >> logs, \"sorting %d features...\" % len(weights),\n for i, f in enumerate(sorted(weights), 1):\n if i == 1: # sorting done\n print >> logs, \"done in %.2lf seconds.\" % mytime.period()\n print >> logs, \"writing features to %s...\" % filename\n \n v = weights[f]\n if math.fabs(v) > 1e-3:\n print >> outfile, \"%s\\t%.5lf\" % (f, v)\n nonzero += 1\n\n if self.unk > 0: # print known words\n print >> outfile, \" \" + \" \".join(sorted(self.knowns)) # \" \" to mark\n\n print >> logs, \"%d nonzero feature instances written in %.2lf seconds.\" % \\\n (nonzero, mytime.period()) ## nonzero != i\n\n @staticmethod\n def trim(fv):\n for f in fv:\n if math.fabs(fv[f]) < 1e-3:\n del fv[f]\n return fv\n\n @staticmethod\n def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):\n return [\"q0t=%s\" % (q0t),\n \"q0w-q0t=%s|%s\" % (q0w, q0t),\n \"q0w=%s\" % (q0w),\n \"s0t-q0t-q1t=%s|%s|%s\" % (s0t, q0t, q1t),\n \"s0t-q0t=%s|%s\" % (s0t, q0t),\n \"s0t-s1t=%s|%s\" % (s0t, s1t),\n \"s0t-s1w-s1t=%s|%s|%s\" % (s0t, s1w, s1t),\n \"s0t=%s\" % (s0t),\n \"s0w-q0t-q1t=%s|%s|%s\" % (s0w, q0t, q1t),\n \"s0w-s0t-s1t=%s|%s|%s\" % (s0w, s0t, s1t),\n \"s0w-s0t-s1w-s1t=%s|%s|%s|%s\" % (s0w, s0t, s1w, s1t),\n \"s0w-s0t-s1w=%s|%s|%s\" % (s0w, s0t, s1w),\n \"s0w-s0t=%s|%s\" % (s0w, s0t),\n \"s0w-s1w-s1t=%s|%s|%s\" % (s0w, s1w, s1t),\n \"s0w-s1w=%s|%s\" % (s0w, s1w),\n \"s0w=%s\" % (s0w),\n \"s1t-s0t-q0t=%s|%s|%s\" % (s1t, s0t, q0t),\n \"s1t-s0t-s0lct=%s|%s|%s\" % (s1t, s0t, s0lct),\n \"s1t-s0t-s0rct=%s|%s|%s\" % (s1t, s0t, s0rct),\n \"s1t-s0w-q0t=%s|%s|%s\" % (s1t, s0w, q0t),\n \"s1t-s0w-s0lct=%s|%s|%s\" % (s1t, s0w, s0lct),\n \"s1t-s1lct-s0t=%s|%s|%s\" % (s1t, s1lct, s0t),\n \"s1t-s1lct-s0w=%s|%s|%s\" % (s1t, s1lct, s0w),\n \"s1t-s1rct-s0t=%s|%s|%s\" % (s1t, s1rct, s0t),\n \"s1t-s1rct-s0w=%s|%s|%s\" % (s1t, s1rct, s0w),\n \"s1t=%s\" % (s1t),\n \"s1w-s1t=%s|%s\" % (s1w, s1t),\n \"s1w=%s\" % (s1w),\n \"s2t-s1t-s0t=%s|%s|%s\" % (s2t, s1t, s0t)]\n\n def prune(self, filenames):\n '''prune features from word/tag lines'''\n\n print >> logs, \"pruning features using %s...\" % filenames,\n \n fullset = set()\n for filename in filenames.split():\n for l in open(filename):\n for w, t in map(lambda x:x.rsplit(\"/\", 1), l.split()):\n fullset.add(w)\n fullset.add(t)\n\n print >> logs, \"collected %d uniq words & tags...\" % (len(fullset)),\n\n new = new_vector() # Vector()\n for f in self.weights:\n\n stuff = f.split(\"=\", 1)[1].rsplit(\"=\", 1)[0].split(\"|\") ## b/w 1st and last \"=\", but caution\n for s in stuff:\n if s not in fullset:\n break\n else:\n new[f] = self.weights[f]\n\n print >> logs, \"%d features survived (ratio: %.2f)\" % (len(new), len(new) / len(self.weights))\n self.weights = new\n\n def sparsify(self, z=1):\n '''duchi et al., 2008'''\n \n \n\nif __name__ == \"__main__\":\n\n flags.DEFINE_string(\"prune\", None, \"prune features w.r.t. FILE (word/tag format)\")\n\n try:\n argv = FLAGS(sys.argv)\n if FLAGS.weights is None:\n raise flags.FlagsError(\"must specify weights by -w ...\")\n except flags.FlagsError, e:\n print >> logs, 'Error: %s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)\n sys.exit(1)\n \n FLAGS.featstat = True\n \n model = Model(FLAGS.weights) #.model, FLAGS.weights)\n\n if FLAGS.prune:\n model.prune(FLAGS.prune)\n\n if FLAGS.outputweights:\n model.write(FLAGS.outputweights)\n\n \n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.apps import AppConfig class PrimaryuserConfig(AppConfig): name = 'PrimaryUser'
normal
{ "blob_id": "82c10076ba73723b696e3e33280296c2a24f20b9", "index": 4187, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass PrimaryuserConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass PrimaryuserConfig(AppConfig):\n name = 'PrimaryUser'\n", "step-4": "from django.apps import AppConfig\n\n\nclass PrimaryuserConfig(AppConfig):\n name = 'PrimaryUser'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.db import models from datetime import datetime # Create your models here. class Notifications(models.Model): username= models.CharField(max_length=20) phone_number= models.BigIntegerField(default= 0) email= models.EmailField() firstname= models.CharField(max_length=20) app_name= models.CharField(max_length=50) service= models.CharField(max_length=50) datetime= models.CharField(default= str(datetime.now()), max_length=50) message= models.CharField(default= 0, max_length=300) notify_type= models.CharField(default= 'email', max_length=20)
normal
{ "blob_id": "51ed99a68486bd52499bbc28e68ff2312e02ea1f", "index": 6604, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Notifications(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Notifications(models.Model):\n username = models.CharField(max_length=20)\n phone_number = models.BigIntegerField(default=0)\n email = models.EmailField()\n firstname = models.CharField(max_length=20)\n app_name = models.CharField(max_length=50)\n service = models.CharField(max_length=50)\n datetime = models.CharField(default=str(datetime.now()), max_length=50)\n message = models.CharField(default=0, max_length=300)\n notify_type = models.CharField(default='email', max_length=20)\n", "step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Notifications(models.Model):\n username = models.CharField(max_length=20)\n phone_number = models.BigIntegerField(default=0)\n email = models.EmailField()\n firstname = models.CharField(max_length=20)\n app_name = models.CharField(max_length=50)\n service = models.CharField(max_length=50)\n datetime = models.CharField(default=str(datetime.now()), max_length=50)\n message = models.CharField(default=0, max_length=300)\n notify_type = models.CharField(default='email', max_length=20)\n", "step-5": "from django.db import models\nfrom datetime import datetime\n\n\n# Create your models here.\nclass Notifications(models.Model):\n username= models.CharField(max_length=20)\n phone_number= models.BigIntegerField(default= 0)\n email= models.EmailField()\n firstname= models.CharField(max_length=20)\n app_name= models.CharField(max_length=50)\n service= models.CharField(max_length=50)\n datetime= models.CharField(default= str(datetime.now()), max_length=50)\n message= models.CharField(default= 0, max_length=300)\n notify_type= models.CharField(default= 'email', max_length=20)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding:utf-8 -*- import time from abc import ABCMeta, abstractmethod from xlreportform.worksheet import WorkSheet __author__ = "Andy Yang" class Bases(metaclass=ABCMeta): def __init__(self): pass @abstractmethod def set_style(self): """set workshet's style, indent,border,font,and so on""" @abstractmethod def query(self): """query from mysql, sqlserver""" @abstractmethod def clean(self): """clean data""" @abstractmethod def export(self): """export data""" class ReportForm(Bases, WorkSheet): def __init__(self, visible=False, filename=None, sheetname=None): WorkSheet.__init__(self, visible, filename, sheetname) def __new__(cls, *args, **kwargs): cls.query(cls) cls.clean(cls) cls.set_style(cls) cls.export(cls) return object.__new__(cls) class DayRport(ReportForm): def query(self): print('query') def set_style(self): print('set_style') def export(self): print('export') if __name__ == '__main__': d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda') time.sleep(5) print(d)
normal
{ "blob_id": "092c6d637fe85136b4184d05f0ac7db17a8efb3b", "index": 6087, "step-1": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n", "step-3": "<mask token>\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n", "step-4": "import time\nfrom abc import ABCMeta, abstractmethod\nfrom xlreportform.worksheet import WorkSheet\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n", "step-5": "# -*- coding:utf-8 -*-\r\nimport time\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom xlreportform.worksheet import WorkSheet\r\n\r\n__author__ = \"Andy Yang\"\r\n\r\n\r\nclass Bases(metaclass=ABCMeta):\r\n def __init__(self):\r\n pass\r\n\r\n @abstractmethod\r\n def set_style(self):\r\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\r\n\r\n @abstractmethod\r\n def query(self):\r\n \"\"\"query from mysql, sqlserver\"\"\"\r\n\r\n @abstractmethod\r\n def clean(self):\r\n \"\"\"clean data\"\"\"\r\n\r\n @abstractmethod\r\n def export(self):\r\n \"\"\"export data\"\"\"\r\n\r\n\r\nclass ReportForm(Bases, WorkSheet):\r\n def __init__(self, visible=False, filename=None, sheetname=None):\r\n WorkSheet.__init__(self, visible, filename, sheetname)\r\n\r\n def __new__(cls, *args, **kwargs):\r\n cls.query(cls)\r\n cls.clean(cls)\r\n cls.set_style(cls)\r\n cls.export(cls)\r\n return object.__new__(cls)\r\n\r\n\r\nclass DayRport(ReportForm):\r\n def query(self):\r\n print('query')\r\n def set_style(self):\r\n print('set_style')\r\n def export(self):\r\n print('export')\r\n\r\n\r\nif __name__ == '__main__':\r\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\r\n time.sleep(5)\r\n print(d)", "step-ids": [ 13, 14, 15, 16, 17 ] }
[ 13, 14, 15, 16, 17 ]
# message 为定义的变量 message = 'Hello Python World ' print(message)
normal
{ "blob_id": "ee5e970f32b1d601f9dc3ab37a5028ce7ff8a32e", "index": 1368, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(message)\n", "step-3": "message = 'Hello Python World '\nprint(message)\n", "step-4": "# message 为定义的变量\r\nmessage = 'Hello Python World '\r\nprint(message)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import csv from pprint import pprint as pp with open('nodes_tags.csv', 'r') as f: tags = csv.DictReader(f) for row in tags: if row['key'] == 'FIXME': pp(row)
normal
{ "blob_id": "d0981d279f7090d5309aa564252dba731a34a66b", "index": 1424, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n", "step-3": "import csv\nfrom pprint import pprint as pp\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals import markdown from django.db import models from django.contrib.auth.models import User from datetime import datetime class MovieRankings(models.Model): """ 各种电影排行榜. """ name = models.CharField(max_length=100) def __unicode__(self): return self.name class Movie(models.Model): """ 电影的数据库表格 """ movie_name = models.CharField(max_length=64, blank=True) # 豆瓣链接,值可以是null,也可以不填这个字段. douban_link = models.CharField(max_length=256, null=True, blank=True) # 豆瓣评分. douban_score = models.CharField(max_length=64, null=True, blank=True) # 豆瓣评分人数. douban_counter = models.PositiveIntegerField(default=0, blank=True) # Imdb链接. imdb_link = models.CharField(max_length=256, null=True, blank=True) # Imdb评分. imdb_score = models.CharField(max_length=64, null=True, blank=True) # Imdb评分人数. imdb_counter = models.PositiveIntegerField(default=0, blank=True) # 网站中的链接. nomovie_link = models.CharField(max_length=256, null=True, blank=True) # 网站中评分. nomovie_score = models.CharField(max_length=64, null=True, blank=True) # 网站中评分人数. nomovie_counter = models.PositiveIntegerField(default=0, blank=True) # 上映国家. country = models.CharField(max_length=64, null=True, blank=True) # 上映日期. dateyear = models.CharField(max_length=64, null=True, blank=True) # 主演. actor = models.CharField(max_length=256, null=True, blank=True) # 导演. director = models.CharField(max_length=256, null=True, blank=True) # 电影类型. style = models.CharField(max_length=64, null=True, blank=True) # 电影播放地址. movie_address = models.CharField(max_length=256, null=True, blank=True) # 电影下载链接. download_link = models.CharField(max_length=256, null=True, blank=True) # 电影在本网站的播放次数. counter = models.PositiveIntegerField(default=0, blank=True) # 电影来源, # 0:表示豆瓣top250 1:表示imdbtop250 2:表示普通豆瓣 3:表示普通imdb # 4:表示在豆瓣和imdb中都存在 5表示:用户自添加 original = models.CharField(max_length=256, null=True, blank=True) # 1:表示通过 0:表示未通过 2:表示审核中 status = models.IntegerField(null=True, blank=True) # 图片保存地址 image = models.CharField(max_length=256, null=True, blank=True) # 爬取电影入库时间 spidertime = models.DateTimeField(auto_now_add=True, null=True) # 关于电影 aboutmovie = models.CharField(max_length=256, null=True, blank=True) # 电影语言 language = models.CharField(max_length=64, null=True, blank=True) # 电影天堂搜索地址 dyttsearch = models.CharField(max_length=256, null=True, blank=True) # 电影天堂搜索电影详情页面 dyttdetail = models.CharField(max_length=256, null=True, blank=True) movierankings = models.ForeignKey(MovieRankings, null=True, blank=True) def __unicode__(self): return self.movie_name # def get_comments(self): class MovieHistory(models.Model): # 观看的用户. # 用户一对多MovieHistory,可以看多个电影. user = models.ForeignKey(User) # 观看的电影. movie = models.ForeignKey(Movie) # 观看的时间. date = models.DateTimeField(auto_now_add=True) # 0表示用户观看了该电影,1表示收藏,2表示推荐. marked = models.IntegerField(blank=True, null=True) def __unicode__(self): return "{%s}--{%s}" % (self.user.username, self.movie.movie_name)
normal
{ "blob_id": "449ae193f8817d4ee2fe67eadf72d9c19b2c5e53", "index": 1319, "step-1": "<mask token>\n\n\nclass MovieRankings(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n", "step-2": "<mask token>\n\n\nclass MovieRankings(models.Model):\n <mask token>\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n", "step-3": "<mask token>\n\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n", "step-4": "from __future__ import unicode_literals\nimport markdown\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport markdown\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nfrom datetime import datetime\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n def __unicode__(self):\n return self.name\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n # 豆瓣链接,值可以是null,也可以不填这个字段.\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n # 豆瓣评分.\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n # 豆瓣评分人数.\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n # Imdb链接.\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n # Imdb评分.\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n # Imdb评分人数.\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n # 网站中的链接.\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n # 网站中评分.\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n # 网站中评分人数.\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n # 上映国家.\n country = models.CharField(max_length=64, null=True, blank=True)\n # 上映日期.\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n # 主演.\n actor = models.CharField(max_length=256, null=True, blank=True)\n # 导演.\n director = models.CharField(max_length=256, null=True, blank=True)\n # 电影类型.\n style = models.CharField(max_length=64, null=True, blank=True)\n # 电影播放地址.\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n # 电影下载链接.\n download_link = models.CharField(max_length=256, null=True, blank=True)\n # 电影在本网站的播放次数.\n counter = models.PositiveIntegerField(default=0, blank=True)\n # 电影来源,\n # 0:表示豆瓣top250 1:表示imdbtop250 2:表示普通豆瓣 3:表示普通imdb \n # 4:表示在豆瓣和imdb中都存在 5表示:用户自添加\n original = models.CharField(max_length=256, null=True, blank=True)\n # 1:表示通过 0:表示未通过 2:表示审核中\n status = models.IntegerField(null=True, blank=True)\n # 图片保存地址\n image = models.CharField(max_length=256, null=True, blank=True)\n # 爬取电影入库时间\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n # 关于电影\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n # 电影语言\n language = models.CharField(max_length=64, null=True, blank=True)\n # 电影天堂搜索地址\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n # 电影天堂搜索电影详情页面\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n # def get_comments(self):\n\nclass MovieHistory(models.Model):\n # 观看的用户.\n # 用户一对多MovieHistory,可以看多个电影.\n user = models.ForeignKey(User)\n # 观看的电影.\n movie = models.ForeignKey(Movie)\n # 观看的时间.\n date = models.DateTimeField(auto_now_add=True)\n # 0表示用户观看了该电影,1表示收藏,2表示推荐.\n marked = models.IntegerField(blank=True, null=True)\n \n def __unicode__(self):\n return \"{%s}--{%s}\" % (self.user.username, self.movie.movie_name)\n\n\n", "step-ids": [ 8, 10, 11, 12, 13 ] }
[ 8, 10, 11, 12, 13 ]
# -*- coding: utf-8 -*- __author__ = 'tqs' from win32com.client import Dispatch import win32com.client import time import os import re import win32api ''' windows操作部分说明: 考试波及知识点: 1.删除文件及文件夹 2.复制文件及文件夹 3.移动文件及文件夹 4.文件及文件夹改名 5.文件属性 考试样例: 1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。 2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。 3、设置“螺旋藻.aaa”文件属性为“只读”。 4、在桌面上建立“绿色植物”的快捷方式。 ''' class WinOperation: def __init__(self): self.soucePath = '' self.destPath = '' self.destFilename = '' self.sourceFilename = '' def dele(self,destFilename):#删除文件及文件夹 print('删除文件',destFilename) pass def rename(self,sourceFilename,destFilename):#文件改名 print(sourceFilename,'文件改名为',destFilename) pass def mov(self,sourceFilename,destFilename):#移动文件 print(sourceFilename,'移动文件为',destFilename) pass def copy(self,sourceFilename,destFilename):#复制文件 print(sourceFilename,'移动文件为',destFilename) pass def prop(self,destFilename):#文件属性 print('文件属性',destFilename) pass def realSourceFilename(self,soucePath,sourceFilename): return sourceFilename def realdestFilename(self,destPath,destFilename): return destFilename def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹 print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) pass def judgeDele(self,OperStr):#从文本中判断删除文件 print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) pass def judgeRename(self,OperStr):#从文本中判断重命名文件 print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) pass def judgeMov(self,OperStr):#从文本中判断移动文件 #形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析 #解析为源文件,目标文件 print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) #需要再取得完整路径,需要查找 sourceFilename=self.realSourceFilename("d:\zrexam\windows",source) destFilename=self.realdestFilename("d:\zrexam\windows",dest) self.mov(sourceFilename,destFilename) def judgeCopy(self,OperStr): print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) pass def judgeProp(self,OperStr): print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) ## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN) ## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL) pass def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作 for item in OperStrList: pass def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表 pass def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作 if OperStr.find("新建") !=-1: print("进入新建操作") self.judgeNew(OperStr) print("结束新建操作") if OperStr.find("删除") !=-1: print("进入删除操作") self.judgeDele(OperStr) print("结束删除操作") if OperStr.find("复制") !=-1: print("进入复制操作") self.judgeCopy(OperStr) print("结束复制操作") if OperStr.find("移动") !=-1: print("进入移动操作") self.judgeMov(OperStr) print("结束移动操作") if OperStr.find("改名") !=-1: print("进入改名操作") self.judgeRename(OperStr) print("结束改名操作") if OperStr.find("属性") !=-1: print("进入属性操作") self.judgeProp(OperStr) print("结束属性操作") ''' word操作部分说明: 考试波及知识点: 1.字体 2.段落 3.查找替换 4.插入 表格,艺术字,图片 5.页边距,分栏 1. 将标题“师恩难忘”设置为黑体,居中对齐。 2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。 3.将文中所有的“田老师”替换为“田先生”。 4. 设置页边距为上下各2.5厘米(应用于整篇文档)。 5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。 考试样例: ''' class WordOperation: def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话) self.wordApp = win32com.client.Dispatch('Word.Application') if filename: self.filename = filename else: self.filename = '' def save(self, newfilename=None): #保存文件 if newfilename: self.filename = newfilename else: pass def close(self): #关闭文件 del self.wordApp def fontOper(self): pass def replaceOper(self,source,dest): pass def insertOper(self,style): pass def pageOper(self): pass def paragraphOper(self): pass def judgePage(self,OperStr): print('正在完成要求',OperStr) def judgeFont(self,OperStr): print('正在完成要求',OperStr) def judgeReplace(self,OperStr): print('正在完成要求',OperStr) def judgeInsert(self,OperStr): print('正在完成要求',OperStr) def judgeParagraph(self,OperStr): print('正在完成要求',OperStr) def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作 if OperStr.find("标题") !=-1 or OperStr.find("黑体") !=-1 or OperStr.find("居中对齐") !=-1: print("进入字体操作") self.judgeFont(OperStr) print("结束字体") elif OperStr.find("首行缩进") !=-1 or OperStr.find("行距") !=-1: print("进入段落操作") self.judgeParagraph(OperStr) print("结束段落操作") elif OperStr.find("插入") !=-1: print("进入插入操作") self.judgeInsert(OperStr) print("结束插入操作") elif OperStr.find("页边距") !=-1: print("进入页边距操作") self.judgePage(OperStr) print("结束页边距操作") elif OperStr.find("分栏") !=-1: print("进入分栏操作") self.judgeFont(OperStr) print("结束分栏操作") elif OperStr.find("替换") !=-1: print("进入替换操作") self.judgeReplace(OperStr) print("结束替换操作") ''' Excel操作部分说明: 考试波及知识点: 1.行高列宽 2.格式相关 3.公式函数 4.排序 5.插入图表 考试样例: 1.将A2所在行的行高设置为30(40像素)。 2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。 3.给A2:F8单元格区域加所有框线。 4.按“无人机社团人数”由高到低排序。 5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。 ''' class ExcelOperation: def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话) self.xlApp = win32com.client.Dispatch('Excel.Application') if filename: self.filename = filename self.xlBook = self.xlApp.Workbooks.Open(filename) else: self.xlBook = self.xlApp.Workbooks.Add() self.filename = '' def save(self, newfilename=None): #保存文件 if newfilename: self.filename = newfilename self.xlBook.SaveAs(newfilename) else: self.xlBook.Save() def close(self): #关闭文件 self.xlBook.Close(SaveChanges=0) del self.xlApp def getCell(self, sheet, row, col): #获取单元格的数据 "Get value of one cell" sht = self.xlBook.Worksheets(sheet) return sht.Cells(row, col).Value def setCell(self, sheet, row, col, value): #设置单元格的数据 "set value of one cell" sht = self.xlBook.Worksheets(sheet) sht.Cells(row, col).Value = value def setCellformat(self, sheet, row, col): #设置单元格的数据 "set value of one cell" sht = self.xlBook.Worksheets(sheet) sht.Cells(row, col).Font.Size = 15#字体大小 sht.Cells(row, col).Font.Bold = True#是否黑体 sht.Cells(row, col).Font.Name = "Arial"#字体类型 sht.Cells(row, col).Interior.ColorIndex = 3#表格背景 #sht.Range("A1").Borders.LineStyle = xlDouble sht.Cells(row, col).BorderAround(1,4)#表格边框 sht.Rows(3).RowHeight = 30#行高 sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter sht.Cells(row, col).VerticalAlignment = -4160 # def rowHeightOper(self,sheet,row,height): sht = self.xlBook.Worksheets(sheet) sht.Rows(row).RowHeight = height def deleteRow(self, sheet, row): sht = self.xlBook.Worksheets(sheet) sht.Rows(row).Delete()#删除行 sht.Columns(row).Delete()#删除列 def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组 "return a 2d array (i.e. tuple of tuples)" sht = self.xlBook.Worksheets(sheet) return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片 "Insert a picture in sheet" sht = self.xlBook.Worksheets(sheet) sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height) def cpSheet(self, before): #复制工作表 "copy sheet" shts = self.xlBook.Worksheets shts(1).Copy(None,shts(1)) def judgeRowHeight(self,OperStr):#行高操作 print('正在完成要求',OperStr) def judgeColWidth(self,OperStr): print('正在完成要求',OperStr) def judgeFormula(self,OperStr): print('正在完成要求',OperStr) def judgeFunction(self,OperStr): print('正在完成要求',OperStr) def judgeSort(self,OperStr): print('正在完成要求',OperStr) def judgeChart(self,OperStr): print('正在完成要求',OperStr) def judgeBoxLine(self,OperStr): print('正在完成要求',OperStr) def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作 if OperStr.find("行高") !=-1: print("进入行高操作") self.judgeRowHeight(OperStr) print("结束行高操作") if OperStr.find("列宽") !=-1: print("进入列宽操作") self.judgeColWidth(OperStr) print("结束列宽操作") if OperStr.find("公式") !=-1: print("进入公式操作") self.judgeFormula(OperStr) print("结束公式操作") if OperStr.find("函数") !=-1: print("进入函数操作") self.judgeFunction(OperStr) print("结束函数操作") if OperStr.find("所有框线") !=-1: print("进入所有框线操作") self.judgeBoxLine(OperStr) print("结束所有框线操作") if OperStr.find("排序") !=-1: print("进入排序操作") self.judgeSort(OperStr) print("结束排序操作") if OperStr.find("图表") !=-1: print("进入图表操作") self.judgeChart(OperStr) print("结束图表操作") pass ''' PPT操作部分说明: 1.动画效果 2.切换效果 3.超级链接 4.背景 5.插入,图片,声音,视频 考试样例: 1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。 2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。 3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。 4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。 5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。 ''' class PptOperation: def __init__(self): pass def AnimationOper(self): pass def SwitchOper(self): pass def InsertOper(self,style): pass def BackgroundOper(self): pass def HyperlinkOper(self): pass def judgeAnimation(self,OperStr): print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) def judgeSwitch(self,OperStr): print('正在完成要求',OperStr) pattern = re.compile('“(.*)”') print (pattern.findall(OperStr)) strFile=str(pattern.findall(OperStr)) file1=strFile.split("”") source=file1[0][2:]#获得源文件 print(source) file2=strFile.split("“") dest=file2[1][0:-2]#获得目标文件 print(dest) def judgeInsert(self,OperStr): print('正在完成要求',OperStr) def judgeBackground(self,OperStr): print('正在完成要求',OperStr) def judgeHyperlink(self,OperStr): print('正在完成要求',OperStr) def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作 if OperStr.find("动画") !=-1: print("进入动画操作") self.judgeAnimation(OperStr) print("结束动画操作") if OperStr.find("切换") !=-1: print("进入切换操作") self.judgeSwitch(OperStr) print("结束切换操作") if OperStr.find("超级链接") !=-1: print("进入超级链接操作") self.judgeHyperlink(OperStr) print("结束超级链接操作") if OperStr.find("背景") !=-1: print("进入背景操作") self.judgeBackground(OperStr) print("结束背景操作") if OperStr.find("插入") !=-1: print("进入插入操作") self.judgeInsert(OperStr) print("结束插入操作") ''' Input文字录入操作部分说明: 考试波及知识点: com对象的调用演示: class InputOperation: ''' class OperationTypeJudge: def __init__(self): pass def getType(self,OperStr): if OperStr.find("替换") !=-1 or OperStr.find("首行缩进") !=-1: print('这是word题要求') print('已转word题处理') elif OperStr.find("公式") !=-1 or OperStr.find("函数") !=-1: print('这是excel题要求') print('已转excel题处理') elif OperStr.find("切换") !=-1 or OperStr.find("动画") !=-1: print('这是ppt题要求') print('已转ppt题处理') pass def getOperaPath(self): pass def getOperaFileName(self): pass ''' 选择题部分说明: ''' class SelectOperation: def __init__(self): pass def getQusetionTxt(self,item): pass def getQusetionPic(self,item): pass def getAnswer(self,item): pass def getCorrectAnswer(self,item): pass ''' 判断题部分说明: ''' class JudgeOperation: def __init__(self): pass def getQusetionTxt(self,item): pass def getQusetionPic(self,item): pass def getAnswer(self,item): pass def getCorrectAnswer(self,item): pass if __name__ == "__main__": win=WinOperation() win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。') win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。') win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。') win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。') word=WordOperation() word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。') word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。') word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。') word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。') word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。') excel=ExcelOperation(r'c:/test.xls') excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。') excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。') excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。') excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。') excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。') ppt=PptOperation() ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。') ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。') ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。') ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。') ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')
normal
{ "blob_id": "b453006b4d4c5f17bb58110fe8197d7796ca0c6c", "index": 467, "step-1": "<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n <mask token>\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n <mask token>\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n <mask token>\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n <mask token>\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass WinOperation:\n <mask token>\n <mask token>\n\n def rename(self, sourceFilename, destFilename):\n print(sourceFilename, '文件改名为', destFilename)\n pass\n\n def mov(self, sourceFilename, destFilename):\n print(sourceFilename, '移动文件为', destFilename)\n pass\n <mask token>\n <mask token>\n\n def realSourceFilename(self, soucePath, sourceFilename):\n return sourceFilename\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def judgeCopy(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n pass\n <mask token>\n\n def judgeOperFromList(self, OperStrList):\n for item in OperStrList:\n pass\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('标题') != -1 or OperStr.find('黑体'\n ) != -1 or OperStr.find('居中对齐') != -1:\n print('进入字体操作')\n self.judgeFont(OperStr)\n print('结束字体')\n elif OperStr.find('首行缩进') != -1 or OperStr.find('行距') != -1:\n print('进入段落操作')\n self.judgeParagraph(OperStr)\n print('结束段落操作')\n elif OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n elif OperStr.find('页边距') != -1:\n print('进入页边距操作')\n self.judgePage(OperStr)\n print('结束页边距操作')\n elif OperStr.find('分栏') != -1:\n print('进入分栏操作')\n self.judgeFont(OperStr)\n print('结束分栏操作')\n elif OperStr.find('替换') != -1:\n print('进入替换操作')\n self.judgeReplace(OperStr)\n print('结束替换操作')\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n", "step-5": "# -*- coding: utf-8 -*-\n__author__ = 'tqs'\nfrom win32com.client import Dispatch \nimport win32com.client \nimport time\nimport os\nimport re\nimport win32api\n'''\nwindows操作部分说明:\n考试波及知识点:\n1.删除文件及文件夹\n2.复制文件及文件夹\n3.移动文件及文件夹\n4.文件及文件夹改名\n5.文件属性\n考试样例:\n1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。\n2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。\n3、设置“螺旋藻.aaa”文件属性为“只读”。\n4、在桌面上建立“绿色植物”的快捷方式。\n'''\nclass WinOperation:\n def __init__(self):\n self.soucePath = ''\n self.destPath = ''\n self.destFilename = ''\n self.sourceFilename = ''\n def dele(self,destFilename):#删除文件及文件夹\n print('删除文件',destFilename)\n pass\n def rename(self,sourceFilename,destFilename):#文件改名\n print(sourceFilename,'文件改名为',destFilename)\n pass\n def mov(self,sourceFilename,destFilename):#移动文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def copy(self,sourceFilename,destFilename):#复制文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def prop(self,destFilename):#文件属性\n print('文件属性',destFilename)\n pass\n def realSourceFilename(self,soucePath,sourceFilename):\n return sourceFilename\n def realdestFilename(self,destPath,destFilename):\n return destFilename\n def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeDele(self,OperStr):#从文本中判断删除文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n pass\n def judgeRename(self,OperStr):#从文本中判断重命名文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeMov(self,OperStr):#从文本中判断移动文件\n #形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析\n #解析为源文件,目标文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n #需要再取得完整路径,需要查找\n sourceFilename=self.realSourceFilename(\"d:\\zrexam\\windows\",source)\n destFilename=self.realdestFilename(\"d:\\zrexam\\windows\",dest)\n self.mov(sourceFilename,destFilename)\n def judgeCopy(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeProp(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)\n pass\n def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作\n for item in OperStrList:\n pass\n def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表 \n pass\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"新建\") !=-1:\n print(\"进入新建操作\")\n self.judgeNew(OperStr)\n print(\"结束新建操作\")\n if OperStr.find(\"删除\") !=-1:\n print(\"进入删除操作\")\n self.judgeDele(OperStr)\n print(\"结束删除操作\")\n if OperStr.find(\"复制\") !=-1:\n print(\"进入复制操作\")\n self.judgeCopy(OperStr)\n print(\"结束复制操作\")\n if OperStr.find(\"移动\") !=-1:\n print(\"进入移动操作\")\n self.judgeMov(OperStr)\n print(\"结束移动操作\")\n if OperStr.find(\"改名\") !=-1:\n print(\"进入改名操作\")\n self.judgeRename(OperStr)\n print(\"结束改名操作\")\n if OperStr.find(\"属性\") !=-1:\n print(\"进入属性操作\")\n self.judgeProp(OperStr)\n print(\"结束属性操作\")\n \n'''\nword操作部分说明:\n考试波及知识点:\n1.字体\n2.段落\n3.查找替换\n4.插入 表格,艺术字,图片\n5.页边距,分栏\n\n1. 将标题“师恩难忘”设置为黑体,居中对齐。\n2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。\n3.将文中所有的“田老师”替换为“田先生”。\n4. 设置页边距为上下各2.5厘米(应用于整篇文档)。\n5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。\n考试样例:\n'''\nclass WordOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.wordApp = win32com.client.Dispatch('Word.Application') \n if filename: \n self.filename = filename\n else:\n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename\n else:\n pass \n def close(self): #关闭文件\n del self.wordApp \n def fontOper(self): \n pass\n def replaceOper(self,source,dest):\n pass\n def insertOper(self,style):\n pass\n def pageOper(self):\n pass\n def paragraphOper(self):\n pass\n def judgePage(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFont(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeReplace(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeParagraph(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"标题\") !=-1 or OperStr.find(\"黑体\") !=-1 or OperStr.find(\"居中对齐\") !=-1:\n print(\"进入字体操作\")\n self.judgeFont(OperStr)\n print(\"结束字体\")\n elif OperStr.find(\"首行缩进\") !=-1 or OperStr.find(\"行距\") !=-1:\n print(\"进入段落操作\")\n self.judgeParagraph(OperStr) \n print(\"结束段落操作\")\n elif OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n elif OperStr.find(\"页边距\") !=-1:\n print(\"进入页边距操作\")\n self.judgePage(OperStr)\n print(\"结束页边距操作\")\n elif OperStr.find(\"分栏\") !=-1:\n print(\"进入分栏操作\")\n self.judgeFont(OperStr)\n print(\"结束分栏操作\")\n elif OperStr.find(\"替换\") !=-1:\n print(\"进入替换操作\")\n self.judgeReplace(OperStr)\n print(\"结束替换操作\")\n \n'''\nExcel操作部分说明:\n考试波及知识点:\n1.行高列宽\n2.格式相关\n3.公式函数\n4.排序\n5.插入图表\n\n考试样例:\n1.将A2所在行的行高设置为30(40像素)。\n2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。\n3.给A2:F8单元格区域加所有框线。\n4.按“无人机社团人数”由高到低排序。\n5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。\n\n'''\nclass ExcelOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.xlApp = win32com.client.Dispatch('Excel.Application') \n if filename: \n self.filename = filename \n self.xlBook = self.xlApp.Workbooks.Open(filename) \n else: \n self.xlBook = self.xlApp.Workbooks.Add() \n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename \n self.xlBook.SaveAs(newfilename) \n else: \n self.xlBook.Save() \n def close(self): #关闭文件\n self.xlBook.Close(SaveChanges=0) \n del self.xlApp \n def getCell(self, sheet, row, col): #获取单元格的数据\n \"Get value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n return sht.Cells(row, col).Value \n def setCell(self, sheet, row, col, value): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Value = value\n def setCellformat(self, sheet, row, col): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Font.Size = 15#字体大小\n sht.Cells(row, col).Font.Bold = True#是否黑体\n sht.Cells(row, col).Font.Name = \"Arial\"#字体类型\n sht.Cells(row, col).Interior.ColorIndex = 3#表格背景\n #sht.Range(\"A1\").Borders.LineStyle = xlDouble\n sht.Cells(row, col).BorderAround(1,4)#表格边框\n sht.Rows(3).RowHeight = 30#行高\n sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter\n sht.Cells(row, col).VerticalAlignment = -4160 #\n def rowHeightOper(self,sheet,row,height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height \n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()#删除行\n sht.Columns(row).Delete()#删除列\n def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组\n \"return a 2d array (i.e. tuple of tuples)\" \n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value \n def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片\n \"Insert a picture in sheet\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n def cpSheet(self, before): #复制工作表\n \"copy sheet\" \n shts = self.xlBook.Worksheets \n shts(1).Copy(None,shts(1))\n def judgeRowHeight(self,OperStr):#行高操作\n print('正在完成要求',OperStr)\n def judgeColWidth(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFormula(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFunction(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeSort(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeChart(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBoxLine(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"行高\") !=-1:\n print(\"进入行高操作\")\n self.judgeRowHeight(OperStr)\n print(\"结束行高操作\")\n if OperStr.find(\"列宽\") !=-1:\n print(\"进入列宽操作\")\n self.judgeColWidth(OperStr)\n print(\"结束列宽操作\")\n if OperStr.find(\"公式\") !=-1:\n print(\"进入公式操作\")\n self.judgeFormula(OperStr)\n print(\"结束公式操作\")\n if OperStr.find(\"函数\") !=-1:\n print(\"进入函数操作\")\n self.judgeFunction(OperStr)\n print(\"结束函数操作\")\n if OperStr.find(\"所有框线\") !=-1:\n print(\"进入所有框线操作\")\n self.judgeBoxLine(OperStr)\n print(\"结束所有框线操作\")\n if OperStr.find(\"排序\") !=-1:\n print(\"进入排序操作\")\n self.judgeSort(OperStr)\n print(\"结束排序操作\")\n if OperStr.find(\"图表\") !=-1:\n print(\"进入图表操作\")\n self.judgeChart(OperStr)\n print(\"结束图表操作\")\n pass\n \n'''\nPPT操作部分说明:\n1.动画效果\n2.切换效果\n3.超级链接\n4.背景\n5.插入,图片,声音,视频\n\n考试样例:\n1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。\n2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。\n3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。\n4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。\n5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。\n'''\n\nclass PptOperation:\n def __init__(self):\n pass\n def AnimationOper(self):\n pass\n def SwitchOper(self):\n pass\n def InsertOper(self,style):\n pass\n def BackgroundOper(self):\n pass\n def HyperlinkOper(self):\n pass\n def judgeAnimation(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeSwitch(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBackground(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeHyperlink(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n \n if OperStr.find(\"动画\") !=-1:\n print(\"进入动画操作\")\n self.judgeAnimation(OperStr)\n print(\"结束动画操作\")\n if OperStr.find(\"切换\") !=-1:\n print(\"进入切换操作\")\n self.judgeSwitch(OperStr)\n print(\"结束切换操作\")\n if OperStr.find(\"超级链接\") !=-1:\n print(\"进入超级链接操作\")\n self.judgeHyperlink(OperStr)\n print(\"结束超级链接操作\")\n if OperStr.find(\"背景\") !=-1:\n print(\"进入背景操作\")\n self.judgeBackground(OperStr)\n print(\"结束背景操作\")\n if OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n \n'''\nInput文字录入操作部分说明:\n考试波及知识点:\ncom对象的调用演示:\nclass InputOperation:\n'''\nclass OperationTypeJudge:\n def __init__(self):\n pass\n def getType(self,OperStr):\n if OperStr.find(\"替换\") !=-1 or OperStr.find(\"首行缩进\") !=-1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find(\"公式\") !=-1 or OperStr.find(\"函数\") !=-1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find(\"切换\") !=-1 or OperStr.find(\"动画\") !=-1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n def getOperaPath(self):\n pass\n def getOperaFileName(self):\n pass\n'''\n选择题部分说明:\n''' \nclass SelectOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass\n \n'''\n判断题部分说明:\n''' \nclass JudgeOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass \nif __name__ == \"__main__\":\n win=WinOperation()\n win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。')\n win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。')\n win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。')\n win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。')\n\n word=WordOperation()\n word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。')\n word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。')\n word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。')\n word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。')\n word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。')\n\n excel=ExcelOperation(r'c:/test.xls')\n excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。')\n excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。')\n excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。')\n excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。')\n excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。')\n\n ppt=PptOperation()\n ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。')\n ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。')\n ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。')\n ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。')\n ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')\n", "step-ids": [ 42, 52, 64, 71, 87 ] }
[ 42, 52, 64, 71, 87 ]
#!/usr/bin/python3 #coding:utf-8 """ Author: Xie Song Email: [email protected] Copyright: Xie Song License: MIT """ import torch def get_sgd_optimizer(args, model): opimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=1e-4) return opimizer
normal
{ "blob_id": "5dca187cfe221f31189ca9a9309ece4b9144ac66", "index": 2812, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay\n =0.0001)\n return opimizer\n", "step-3": "<mask token>\nimport torch\n\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay\n =0.0001)\n return opimizer\n", "step-4": "#!/usr/bin/python3\n#coding:utf-8\n\n\"\"\"\n Author: Xie Song\n Email: [email protected]\n \n Copyright: Xie Song\n License: MIT\n\"\"\"\nimport torch\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=1e-4)\n return opimizer", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from modeltranslation.translator import register, TranslationOptions from .models import * @register(PageTitleModel) class TitleTranslationOptions(TranslationOptions): fields = ( 'name', ) @register(NewsModel) class ProjectTranslationOptions(TranslationOptions): fields = ( 'name', 'text', )
normal
{ "blob_id": "9c29f04746de6847ad1bbdf08964d14e6c3766db", "index": 8700, "step-1": "<mask token>\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n", "step-2": "<mask token>\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n <mask token>\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n", "step-3": "<mask token>\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = 'name',\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n", "step-4": "from modeltranslation.translator import register, TranslationOptions\nfrom .models import *\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = 'name',\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n", "step-5": "from modeltranslation.translator import register, TranslationOptions\nfrom .models import *\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = (\n 'name',\n )\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = (\n 'name',\n 'text',\n )\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Oct 7 07:51:26 2017 @author: hcorrada """ from plagiarism_lib.article_db import ArticleDB from plagiarism_lib.minhash import MinHash from plagiarism_lib.lsh import LSH import pandas as pd import numpy as np def _read_truthfile(filepath): with open(filepath, 'r') as f: truth_pairs = [tuple(sorted(line.strip().split())) for line in f] return set(truth_pairs) def _get_stats(candidate_pairs, truth_pairs): tp = len(candidate_pairs.intersection(truth_pairs)) prec = 1.0 * tp / len(candidate_pairs) rec = 1.0 * tp / len(truth_pairs) print (" returned: %d, tp=%.4f, prec=%.4f, rec=%.4f" % (len(candidate_pairs), tp, prec, rec)) return prec, rec def run(mh, truthfile, ts): truth_pairs = _read_truthfile(truthfile) prec_series = [] rec_series = [] for t in ts: print("Doing LSH with t=", t) lsh = LSH(t) lsh.do_lsh(mh) candidate_pairs = set(lsh.get_candidates()) prec, rec = _get_stats(candidate_pairs, truth_pairs) prec_series.append(prec) rec_series.append(rec) exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series}) return exp_df
normal
{ "blob_id": "18b73a06c80272aff5c0e4b10473e95bd58466f3", "index": 1197, "step-1": "<mask token>\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n prec_series = []\n rec_series = []\n for t in ts:\n print('Doing LSH with t=', t)\n lsh = LSH(t)\n lsh.do_lsh(mh)\n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs)\n prec_series.append(prec)\n rec_series.append(rec)\n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n return exp_df\n", "step-4": "<mask token>\nfrom plagiarism_lib.article_db import ArticleDB\nfrom plagiarism_lib.minhash import MinHash\nfrom plagiarism_lib.lsh import LSH\nimport pandas as pd\nimport numpy as np\n\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split())) for line in f]\n return set(truth_pairs)\n\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs))\n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(\n candidate_pairs), tp, prec, rec))\n return prec, rec\n\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n prec_series = []\n rec_series = []\n for t in ts:\n print('Doing LSH with t=', t)\n lsh = LSH(t)\n lsh.do_lsh(mh)\n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs)\n prec_series.append(prec)\n rec_series.append(rec)\n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n return exp_df\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 7 07:51:26 2017\n\n@author: hcorrada\n\"\"\"\n\nfrom plagiarism_lib.article_db import ArticleDB\nfrom plagiarism_lib.minhash import MinHash\nfrom plagiarism_lib.lsh import LSH\n\nimport pandas as pd\nimport numpy as np\n\ndef _read_truthfile(filepath):\n with open(filepath, 'r') as f:\n truth_pairs = [tuple(sorted(line.strip().split()))\n for line in f]\n return set(truth_pairs)\n\ndef _get_stats(candidate_pairs, truth_pairs):\n tp = len(candidate_pairs.intersection(truth_pairs)) \n prec = 1.0 * tp / len(candidate_pairs)\n rec = 1.0 * tp / len(truth_pairs)\n print (\" returned: %d, tp=%.4f, prec=%.4f, rec=%.4f\" % (len(candidate_pairs), tp, prec, rec))\n return prec, rec\n\ndef run(mh, truthfile, ts):\n truth_pairs = _read_truthfile(truthfile)\n \n prec_series = []\n rec_series = []\n \n for t in ts:\n print(\"Doing LSH with t=\", t) \n lsh = LSH(t)\n lsh.do_lsh(mh)\n \n candidate_pairs = set(lsh.get_candidates())\n prec, rec = _get_stats(candidate_pairs, truth_pairs) \n prec_series.append(prec)\n rec_series.append(rec)\n \n exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})\n \n return exp_df", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
Desafios: 1: Crie um script python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado. Script: Desafio 01: 1: Crie um script python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado.""" nome=input('Qual é o seu nome?') print('Olá ',nome,'! Prazer em te conhecer!') Retorno: Python 3.6.9 (default, Nov 7 2019, 10:44:02) [GCC 8.3.0] on linux Type "help", "copyright", "credits" or "license()" for more information. >>> === RESTART: /home/anderson/Área de Trabalho/scripts_python/desafio_01.py === Qual é o seu nome?Anderson Olá Anderson ! Prazer em te conhecer! >>>
normal
{ "blob_id": "80454a3935f0d42b5535440fc316af1b5598d8a1", "index": 7090, "step-1": "Desafios:\n1: Crie um script python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado.\n\nScript:\nDesafio 01:\n1: Crie um script python que leia o nome de uma pessoa\ne mostre uma mensagem de boas-vindas de acordo com o valor digitado.\"\"\"\nnome=input('Qual é o seu nome?')\nprint('Olá ',nome,'! Prazer em te conhecer!')\n\nRetorno:\nPython 3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n=== RESTART: /home/anderson/Área de Trabalho/scripts_python/desafio_01.py ===\nQual é o seu nome?Anderson\nOlá Anderson ! Prazer em te conhecer!\n>>> ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python # script :: creating a datamodel that fits mahout from ratings.dat ratings_dat = open('../data/movielens-1m/users.dat', 'r') ratings_csv = open('../data/movielens-1m/users.txt', 'w') for line in ratings_dat: arr = line.split('::') new_line = '\t'.join(arr) ratings_csv.write(new_line) ratings_dat.close() ratings_csv.close()
normal
{ "blob_id": "2dd59681a0dcb5d3f1143385100c09c7783babf4", "index": 76, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n", "step-3": "ratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n", "step-4": "#!/usr/bin/env python\n# script :: creating a datamodel that fits mahout from ratings.dat\n\n\n\nratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\n\nfor line in ratings_dat:\n\tarr = line.split('::')\n\tnew_line = '\\t'.join(arr)\n\n\tratings_csv.write(new_line)\n\nratings_dat.close()\nratings_csv.close()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
"""Support for Deebot Vaccums.""" import logging from typing import Any, Mapping, Optional import voluptuous as vol from deebot_client.commands import ( Charge, Clean, FanSpeedLevel, PlaySound, SetFanSpeed, SetRelocationState, SetWaterInfo, ) from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode from deebot_client.commands.custom import CustomCommand from deebot_client.events import ( BatteryEvent, CustomCommandEvent, ErrorEvent, FanSpeedEvent, ReportStatsEvent, RoomsEvent, StatusEvent, ) from deebot_client.events.event_bus import EventListener from deebot_client.models import Room, VacuumState from deebot_client.vacuum_bot import VacuumBot from homeassistant.components.vacuum import ( SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_MAP, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND, SUPPORT_START, SUPPORT_STATE, SUPPORT_STOP, StateVacuumEntity, StateVacuumEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers import entity_platform from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import slugify from .const import ( DOMAIN, EVENT_CLEANING_JOB, EVENT_CUSTOM_COMMAND, LAST_ERROR, REFRESH_MAP, REFRESH_STR_TO_EVENT_DTO, VACUUMSTATE_TO_STATE, ) from .entity import DeebotEntity from .hub import DeebotHub from .util import dataclass_to_dict, unsubscribe_listeners _LOGGER = logging.getLogger(__name__) SUPPORT_DEEBOT: int = ( SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_BATTERY | SUPPORT_SEND_COMMAND | SUPPORT_LOCATE | SUPPORT_MAP | SUPPORT_STATE | SUPPORT_START ) # Must be kept in sync with services.yaml SERVICE_REFRESH = "refresh" SERVICE_REFRESH_PART = "part" SERVICE_REFRESH_SCHEMA = { vol.Required(SERVICE_REFRESH_PART): vol.In( [*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP] ) } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Add entities for passed config_entry in HA.""" hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id] new_devices = [] for vacbot in hub.vacuum_bots: new_devices.append(DeebotVacuum(vacbot)) if new_devices: async_add_entities(new_devices) platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_REFRESH, SERVICE_REFRESH_SCHEMA, "_service_refresh", ) class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore """Deebot Vacuum.""" def __init__(self, vacuum_bot: VacuumBot): """Initialize the Deebot Vacuum.""" device_info = vacuum_bot.device_info if device_info.nick is not None: name: str = device_info.nick else: # In case there is no nickname defined, use the device id name = device_info.did super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name)) self._battery: Optional[int] = None self._fan_speed: Optional[str] = None self._state: Optional[VacuumState] = None self._rooms: list[Room] = [] self._last_error: Optional[ErrorEvent] = None async def async_added_to_hass(self) -> None: """Set up the event listeners now that hass is ready.""" await super().async_added_to_hass() async def on_battery(event: BatteryEvent) -> None: self._battery = event.value self.async_write_ha_state() async def on_custom_command(event: CustomCommandEvent) -> None: self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event)) async def on_error(event: ErrorEvent) -> None: self._last_error = event self.async_write_ha_state() async def on_fan_speed(event: FanSpeedEvent) -> None: self._fan_speed = event.speed self.async_write_ha_state() async def on_report_stats(event: ReportStatsEvent) -> None: self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event)) async def on_rooms(event: RoomsEvent) -> None: self._rooms = event.rooms self.async_write_ha_state() async def on_status(event: StatusEvent) -> None: self._state = event.state self.async_write_ha_state() listeners: list[EventListener] = [ self._vacuum_bot.events.subscribe(BatteryEvent, on_battery), self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command), self._vacuum_bot.events.subscribe(ErrorEvent, on_error), self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed), self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats), self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms), self._vacuum_bot.events.subscribe(StatusEvent, on_status), ] self.async_on_remove(lambda: unsubscribe_listeners(listeners)) @property def supported_features(self) -> int: """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_DEEBOT @property def state(self) -> StateType: """Return the state of the vacuum cleaner.""" if self._state is not None and self.available: return VACUUMSTATE_TO_STATE[self._state] @property def battery_level(self) -> Optional[int]: """Return the battery level of the vacuum cleaner.""" return self._battery @property def fan_speed(self) -> Optional[str]: """Return the fan speed of the vacuum cleaner.""" return self._fan_speed @property def fan_speed_list(self) -> list[str]: """Get the list of available fan speed steps of the vacuum cleaner.""" return [level.display_name for level in FanSpeedLevel] @property def extra_state_attributes(self) -> Optional[Mapping[str, Any]]: """Return entity specific state attributes. Implemented by platform classes. Convention for attribute names is lowercase snake_case. """ attributes: dict[str, Any] = {} rooms: dict[str, Any] = {} for room in self._rooms: # convert room name to snake_case to meet the convention room_name = slugify(room.subtype) room_values = rooms.get(room_name) if room_values is None: rooms[room_name] = room.id elif isinstance(room_values, list): room_values.append(room.id) else: # Convert from int to list rooms[room_name] = [room_values, room.id] if rooms: attributes["rooms"] = rooms if self._last_error: attributes[ LAST_ERROR ] = f"{self._last_error.description} ({self._last_error.code})" return attributes async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None: """Set fan speed.""" await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed)) async def async_return_to_base(self, **kwargs: Any) -> None: """Set the vacuum cleaner to return to the dock.""" await self._vacuum_bot.execute_command(Charge()) async def async_stop(self, **kwargs: Any) -> None: """Stop the vacuum cleaner.""" await self._vacuum_bot.execute_command(Clean(CleanAction.STOP)) async def async_pause(self) -> None: """Pause the vacuum cleaner.""" await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE)) async def async_start(self) -> None: """Start the vacuum cleaner.""" await self._vacuum_bot.execute_command(Clean(CleanAction.START)) async def async_locate(self, **kwargs: Any) -> None: """Locate the vacuum cleaner.""" await self._vacuum_bot.execute_command(PlaySound()) async def async_send_command( self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any ) -> None: """Send a command to a vacuum cleaner.""" _LOGGER.debug("async_send_command %s with %s", command, params) if command in ["relocate", SetRelocationState.name]: _LOGGER.warning("DEPRECATED! Please use relocate button entity instead.") await self._vacuum_bot.execute_command(SetRelocationState()) elif command == "auto_clean": clean_type = params.get("type", "auto") if params else "auto" if clean_type == "auto": _LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.') await self.async_start() elif command in ["spot_area", "custom_area", "set_water"]: if params is None: raise RuntimeError("Params are required!") if command in "spot_area": await self._vacuum_bot.execute_command( CleanArea( mode=CleanMode.SPOT_AREA, area=str(params["rooms"]), cleanings=params.get("cleanings", 1), ) ) elif command == "custom_area": await self._vacuum_bot.execute_command( CleanArea( mode=CleanMode.CUSTOM_AREA, area=str(params["coordinates"]), cleanings=params.get("cleanings", 1), ) ) elif command == "set_water": _LOGGER.warning("DEPRECATED! Please use water select entity instead.") await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"])) else: await self._vacuum_bot.execute_command(CustomCommand(command, params)) async def _service_refresh(self, part: str) -> None: """Service to manually refresh.""" _LOGGER.debug("Manually refresh %s", part) event = REFRESH_STR_TO_EVENT_DTO.get(part, None) if event: self._vacuum_bot.events.request_refresh(event) elif part == REFRESH_MAP: self._vacuum_bot.map.refresh() else: _LOGGER.warning('Service "refresh" called with unknown part: %s', part)
normal
{ "blob_id": "1ab690b0f9c34b1886320e1dfe8b54a5ec6cd4d1", "index": 8712, "step-1": "<mask token>\n\n\nclass DeebotVacuum(DeebotEntity, StateVacuumEntity):\n <mask token>\n\n def __init__(self, vacuum_bot: VacuumBot):\n \"\"\"Initialize the Deebot Vacuum.\"\"\"\n device_info = vacuum_bot.device_info\n if device_info.nick is not None:\n name: str = device_info.nick\n else:\n name = device_info.did\n super().__init__(vacuum_bot, StateVacuumEntityDescription(key='',\n name=name))\n self._battery: Optional[int] = None\n self._fan_speed: Optional[str] = None\n self._state: Optional[VacuumState] = None\n self._rooms: list[Room] = []\n self._last_error: Optional[ErrorEvent] = None\n\n async def async_added_to_hass(self) ->None:\n \"\"\"Set up the event listeners now that hass is ready.\"\"\"\n await super().async_added_to_hass()\n\n async def on_battery(event: BatteryEvent) ->None:\n self._battery = event.value\n self.async_write_ha_state()\n\n async def on_custom_command(event: CustomCommandEvent) ->None:\n self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))\n\n async def on_error(event: ErrorEvent) ->None:\n self._last_error = event\n self.async_write_ha_state()\n\n async def on_fan_speed(event: FanSpeedEvent) ->None:\n self._fan_speed = event.speed\n self.async_write_ha_state()\n\n async def on_report_stats(event: ReportStatsEvent) ->None:\n self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))\n\n async def on_rooms(event: RoomsEvent) ->None:\n self._rooms = event.rooms\n self.async_write_ha_state()\n\n async def on_status(event: StatusEvent) ->None:\n self._state = event.state\n self.async_write_ha_state()\n listeners: list[EventListener] = [self._vacuum_bot.events.subscribe\n (BatteryEvent, on_battery), self._vacuum_bot.events.subscribe(\n CustomCommandEvent, on_custom_command), self._vacuum_bot.events\n .subscribe(ErrorEvent, on_error), self._vacuum_bot.events.\n subscribe(FanSpeedEvent, on_fan_speed), self._vacuum_bot.events\n .subscribe(ReportStatsEvent, on_report_stats), self._vacuum_bot\n .events.subscribe(RoomsEvent, on_rooms), self._vacuum_bot.\n events.subscribe(StatusEvent, on_status)]\n self.async_on_remove(lambda : unsubscribe_listeners(listeners))\n <mask token>\n <mask token>\n\n @property\n def battery_level(self) ->Optional[int]:\n \"\"\"Return the battery level of the vacuum cleaner.\"\"\"\n return self._battery\n <mask token>\n\n @property\n def fan_speed_list(self) ->list[str]:\n \"\"\"Get the list of available fan speed steps of the vacuum cleaner.\"\"\"\n return [level.display_name for level in FanSpeedLevel]\n\n @property\n def extra_state_attributes(self) ->Optional[Mapping[str, Any]]:\n \"\"\"Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n \"\"\"\n attributes: dict[str, Any] = {}\n rooms: dict[str, Any] = {}\n for room in self._rooms:\n room_name = slugify(room.subtype)\n room_values = rooms.get(room_name)\n if room_values is None:\n rooms[room_name] = room.id\n elif isinstance(room_values, list):\n room_values.append(room.id)\n else:\n rooms[room_name] = [room_values, room.id]\n if rooms:\n attributes['rooms'] = rooms\n if self._last_error:\n attributes[LAST_ERROR\n ] = f'{self._last_error.description} ({self._last_error.code})'\n return attributes\n\n async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) ->None:\n \"\"\"Set fan speed.\"\"\"\n await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))\n\n async def async_return_to_base(self, **kwargs: Any) ->None:\n \"\"\"Set the vacuum cleaner to return to the dock.\"\"\"\n await self._vacuum_bot.execute_command(Charge())\n\n async def async_stop(self, **kwargs: Any) ->None:\n \"\"\"Stop the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))\n\n async def async_pause(self) ->None:\n \"\"\"Pause the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))\n\n async def async_start(self) ->None:\n \"\"\"Start the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.START))\n\n async def async_locate(self, **kwargs: Any) ->None:\n \"\"\"Locate the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(PlaySound())\n\n async def async_send_command(self, command: str, params: Optional[dict[\n str, Any]]=None, **kwargs: Any) ->None:\n \"\"\"Send a command to a vacuum cleaner.\"\"\"\n _LOGGER.debug('async_send_command %s with %s', command, params)\n if command in ['relocate', SetRelocationState.name]:\n _LOGGER.warning(\n 'DEPRECATED! Please use relocate button entity instead.')\n await self._vacuum_bot.execute_command(SetRelocationState())\n elif command == 'auto_clean':\n clean_type = params.get('type', 'auto') if params else 'auto'\n if clean_type == 'auto':\n _LOGGER.warning(\n 'DEPRECATED! Please use \"vacuum.start\" instead.')\n await self.async_start()\n elif command in ['spot_area', 'custom_area', 'set_water']:\n if params is None:\n raise RuntimeError('Params are required!')\n if command in 'spot_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.SPOT_AREA, area=str(params['rooms']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'custom_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.CUSTOM_AREA, area=str(params['coordinates']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'set_water':\n _LOGGER.warning(\n 'DEPRECATED! Please use water select entity instead.')\n await self._vacuum_bot.execute_command(SetWaterInfo(params[\n 'amount']))\n else:\n await self._vacuum_bot.execute_command(CustomCommand(command,\n params))\n\n async def _service_refresh(self, part: str) ->None:\n \"\"\"Service to manually refresh.\"\"\"\n _LOGGER.debug('Manually refresh %s', part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s',\n part)\n", "step-2": "<mask token>\n\n\nclass DeebotVacuum(DeebotEntity, StateVacuumEntity):\n <mask token>\n\n def __init__(self, vacuum_bot: VacuumBot):\n \"\"\"Initialize the Deebot Vacuum.\"\"\"\n device_info = vacuum_bot.device_info\n if device_info.nick is not None:\n name: str = device_info.nick\n else:\n name = device_info.did\n super().__init__(vacuum_bot, StateVacuumEntityDescription(key='',\n name=name))\n self._battery: Optional[int] = None\n self._fan_speed: Optional[str] = None\n self._state: Optional[VacuumState] = None\n self._rooms: list[Room] = []\n self._last_error: Optional[ErrorEvent] = None\n\n async def async_added_to_hass(self) ->None:\n \"\"\"Set up the event listeners now that hass is ready.\"\"\"\n await super().async_added_to_hass()\n\n async def on_battery(event: BatteryEvent) ->None:\n self._battery = event.value\n self.async_write_ha_state()\n\n async def on_custom_command(event: CustomCommandEvent) ->None:\n self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))\n\n async def on_error(event: ErrorEvent) ->None:\n self._last_error = event\n self.async_write_ha_state()\n\n async def on_fan_speed(event: FanSpeedEvent) ->None:\n self._fan_speed = event.speed\n self.async_write_ha_state()\n\n async def on_report_stats(event: ReportStatsEvent) ->None:\n self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))\n\n async def on_rooms(event: RoomsEvent) ->None:\n self._rooms = event.rooms\n self.async_write_ha_state()\n\n async def on_status(event: StatusEvent) ->None:\n self._state = event.state\n self.async_write_ha_state()\n listeners: list[EventListener] = [self._vacuum_bot.events.subscribe\n (BatteryEvent, on_battery), self._vacuum_bot.events.subscribe(\n CustomCommandEvent, on_custom_command), self._vacuum_bot.events\n .subscribe(ErrorEvent, on_error), self._vacuum_bot.events.\n subscribe(FanSpeedEvent, on_fan_speed), self._vacuum_bot.events\n .subscribe(ReportStatsEvent, on_report_stats), self._vacuum_bot\n .events.subscribe(RoomsEvent, on_rooms), self._vacuum_bot.\n events.subscribe(StatusEvent, on_status)]\n self.async_on_remove(lambda : unsubscribe_listeners(listeners))\n\n @property\n def supported_features(self) ->int:\n \"\"\"Flag vacuum cleaner robot features that are supported.\"\"\"\n return SUPPORT_DEEBOT\n <mask token>\n\n @property\n def battery_level(self) ->Optional[int]:\n \"\"\"Return the battery level of the vacuum cleaner.\"\"\"\n return self._battery\n <mask token>\n\n @property\n def fan_speed_list(self) ->list[str]:\n \"\"\"Get the list of available fan speed steps of the vacuum cleaner.\"\"\"\n return [level.display_name for level in FanSpeedLevel]\n\n @property\n def extra_state_attributes(self) ->Optional[Mapping[str, Any]]:\n \"\"\"Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n \"\"\"\n attributes: dict[str, Any] = {}\n rooms: dict[str, Any] = {}\n for room in self._rooms:\n room_name = slugify(room.subtype)\n room_values = rooms.get(room_name)\n if room_values is None:\n rooms[room_name] = room.id\n elif isinstance(room_values, list):\n room_values.append(room.id)\n else:\n rooms[room_name] = [room_values, room.id]\n if rooms:\n attributes['rooms'] = rooms\n if self._last_error:\n attributes[LAST_ERROR\n ] = f'{self._last_error.description} ({self._last_error.code})'\n return attributes\n\n async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) ->None:\n \"\"\"Set fan speed.\"\"\"\n await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))\n\n async def async_return_to_base(self, **kwargs: Any) ->None:\n \"\"\"Set the vacuum cleaner to return to the dock.\"\"\"\n await self._vacuum_bot.execute_command(Charge())\n\n async def async_stop(self, **kwargs: Any) ->None:\n \"\"\"Stop the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))\n\n async def async_pause(self) ->None:\n \"\"\"Pause the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))\n\n async def async_start(self) ->None:\n \"\"\"Start the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.START))\n\n async def async_locate(self, **kwargs: Any) ->None:\n \"\"\"Locate the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(PlaySound())\n\n async def async_send_command(self, command: str, params: Optional[dict[\n str, Any]]=None, **kwargs: Any) ->None:\n \"\"\"Send a command to a vacuum cleaner.\"\"\"\n _LOGGER.debug('async_send_command %s with %s', command, params)\n if command in ['relocate', SetRelocationState.name]:\n _LOGGER.warning(\n 'DEPRECATED! Please use relocate button entity instead.')\n await self._vacuum_bot.execute_command(SetRelocationState())\n elif command == 'auto_clean':\n clean_type = params.get('type', 'auto') if params else 'auto'\n if clean_type == 'auto':\n _LOGGER.warning(\n 'DEPRECATED! Please use \"vacuum.start\" instead.')\n await self.async_start()\n elif command in ['spot_area', 'custom_area', 'set_water']:\n if params is None:\n raise RuntimeError('Params are required!')\n if command in 'spot_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.SPOT_AREA, area=str(params['rooms']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'custom_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.CUSTOM_AREA, area=str(params['coordinates']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'set_water':\n _LOGGER.warning(\n 'DEPRECATED! Please use water select entity instead.')\n await self._vacuum_bot.execute_command(SetWaterInfo(params[\n 'amount']))\n else:\n await self._vacuum_bot.execute_command(CustomCommand(command,\n params))\n\n async def _service_refresh(self, part: str) ->None:\n \"\"\"Service to manually refresh.\"\"\"\n _LOGGER.debug('Manually refresh %s', part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s',\n part)\n", "step-3": "<mask token>\n\n\nclass DeebotVacuum(DeebotEntity, StateVacuumEntity):\n \"\"\"Deebot Vacuum.\"\"\"\n\n def __init__(self, vacuum_bot: VacuumBot):\n \"\"\"Initialize the Deebot Vacuum.\"\"\"\n device_info = vacuum_bot.device_info\n if device_info.nick is not None:\n name: str = device_info.nick\n else:\n name = device_info.did\n super().__init__(vacuum_bot, StateVacuumEntityDescription(key='',\n name=name))\n self._battery: Optional[int] = None\n self._fan_speed: Optional[str] = None\n self._state: Optional[VacuumState] = None\n self._rooms: list[Room] = []\n self._last_error: Optional[ErrorEvent] = None\n\n async def async_added_to_hass(self) ->None:\n \"\"\"Set up the event listeners now that hass is ready.\"\"\"\n await super().async_added_to_hass()\n\n async def on_battery(event: BatteryEvent) ->None:\n self._battery = event.value\n self.async_write_ha_state()\n\n async def on_custom_command(event: CustomCommandEvent) ->None:\n self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))\n\n async def on_error(event: ErrorEvent) ->None:\n self._last_error = event\n self.async_write_ha_state()\n\n async def on_fan_speed(event: FanSpeedEvent) ->None:\n self._fan_speed = event.speed\n self.async_write_ha_state()\n\n async def on_report_stats(event: ReportStatsEvent) ->None:\n self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))\n\n async def on_rooms(event: RoomsEvent) ->None:\n self._rooms = event.rooms\n self.async_write_ha_state()\n\n async def on_status(event: StatusEvent) ->None:\n self._state = event.state\n self.async_write_ha_state()\n listeners: list[EventListener] = [self._vacuum_bot.events.subscribe\n (BatteryEvent, on_battery), self._vacuum_bot.events.subscribe(\n CustomCommandEvent, on_custom_command), self._vacuum_bot.events\n .subscribe(ErrorEvent, on_error), self._vacuum_bot.events.\n subscribe(FanSpeedEvent, on_fan_speed), self._vacuum_bot.events\n .subscribe(ReportStatsEvent, on_report_stats), self._vacuum_bot\n .events.subscribe(RoomsEvent, on_rooms), self._vacuum_bot.\n events.subscribe(StatusEvent, on_status)]\n self.async_on_remove(lambda : unsubscribe_listeners(listeners))\n\n @property\n def supported_features(self) ->int:\n \"\"\"Flag vacuum cleaner robot features that are supported.\"\"\"\n return SUPPORT_DEEBOT\n\n @property\n def state(self) ->StateType:\n \"\"\"Return the state of the vacuum cleaner.\"\"\"\n if self._state is not None and self.available:\n return VACUUMSTATE_TO_STATE[self._state]\n\n @property\n def battery_level(self) ->Optional[int]:\n \"\"\"Return the battery level of the vacuum cleaner.\"\"\"\n return self._battery\n\n @property\n def fan_speed(self) ->Optional[str]:\n \"\"\"Return the fan speed of the vacuum cleaner.\"\"\"\n return self._fan_speed\n\n @property\n def fan_speed_list(self) ->list[str]:\n \"\"\"Get the list of available fan speed steps of the vacuum cleaner.\"\"\"\n return [level.display_name for level in FanSpeedLevel]\n\n @property\n def extra_state_attributes(self) ->Optional[Mapping[str, Any]]:\n \"\"\"Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n \"\"\"\n attributes: dict[str, Any] = {}\n rooms: dict[str, Any] = {}\n for room in self._rooms:\n room_name = slugify(room.subtype)\n room_values = rooms.get(room_name)\n if room_values is None:\n rooms[room_name] = room.id\n elif isinstance(room_values, list):\n room_values.append(room.id)\n else:\n rooms[room_name] = [room_values, room.id]\n if rooms:\n attributes['rooms'] = rooms\n if self._last_error:\n attributes[LAST_ERROR\n ] = f'{self._last_error.description} ({self._last_error.code})'\n return attributes\n\n async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) ->None:\n \"\"\"Set fan speed.\"\"\"\n await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))\n\n async def async_return_to_base(self, **kwargs: Any) ->None:\n \"\"\"Set the vacuum cleaner to return to the dock.\"\"\"\n await self._vacuum_bot.execute_command(Charge())\n\n async def async_stop(self, **kwargs: Any) ->None:\n \"\"\"Stop the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))\n\n async def async_pause(self) ->None:\n \"\"\"Pause the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))\n\n async def async_start(self) ->None:\n \"\"\"Start the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.START))\n\n async def async_locate(self, **kwargs: Any) ->None:\n \"\"\"Locate the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(PlaySound())\n\n async def async_send_command(self, command: str, params: Optional[dict[\n str, Any]]=None, **kwargs: Any) ->None:\n \"\"\"Send a command to a vacuum cleaner.\"\"\"\n _LOGGER.debug('async_send_command %s with %s', command, params)\n if command in ['relocate', SetRelocationState.name]:\n _LOGGER.warning(\n 'DEPRECATED! Please use relocate button entity instead.')\n await self._vacuum_bot.execute_command(SetRelocationState())\n elif command == 'auto_clean':\n clean_type = params.get('type', 'auto') if params else 'auto'\n if clean_type == 'auto':\n _LOGGER.warning(\n 'DEPRECATED! Please use \"vacuum.start\" instead.')\n await self.async_start()\n elif command in ['spot_area', 'custom_area', 'set_water']:\n if params is None:\n raise RuntimeError('Params are required!')\n if command in 'spot_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.SPOT_AREA, area=str(params['rooms']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'custom_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.CUSTOM_AREA, area=str(params['coordinates']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'set_water':\n _LOGGER.warning(\n 'DEPRECATED! Please use water select entity instead.')\n await self._vacuum_bot.execute_command(SetWaterInfo(params[\n 'amount']))\n else:\n await self._vacuum_bot.execute_command(CustomCommand(command,\n params))\n\n async def _service_refresh(self, part: str) ->None:\n \"\"\"Service to manually refresh.\"\"\"\n _LOGGER.debug('Manually refresh %s', part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s',\n part)\n", "step-4": "<mask token>\nSUPPORT_DEEBOT: int = (SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_RETURN_HOME |\n SUPPORT_FAN_SPEED | SUPPORT_BATTERY | SUPPORT_SEND_COMMAND |\n SUPPORT_LOCATE | SUPPORT_MAP | SUPPORT_STATE | SUPPORT_START)\n<mask token>\n\n\nasync def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback) ->None:\n \"\"\"Add entities for passed config_entry in HA.\"\"\"\n hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]\n new_devices = []\n for vacbot in hub.vacuum_bots:\n new_devices.append(DeebotVacuum(vacbot))\n if new_devices:\n async_add_entities(new_devices)\n platform = entity_platform.async_get_current_platform()\n platform.async_register_entity_service(SERVICE_REFRESH,\n SERVICE_REFRESH_SCHEMA, '_service_refresh')\n\n\nclass DeebotVacuum(DeebotEntity, StateVacuumEntity):\n \"\"\"Deebot Vacuum.\"\"\"\n\n def __init__(self, vacuum_bot: VacuumBot):\n \"\"\"Initialize the Deebot Vacuum.\"\"\"\n device_info = vacuum_bot.device_info\n if device_info.nick is not None:\n name: str = device_info.nick\n else:\n name = device_info.did\n super().__init__(vacuum_bot, StateVacuumEntityDescription(key='',\n name=name))\n self._battery: Optional[int] = None\n self._fan_speed: Optional[str] = None\n self._state: Optional[VacuumState] = None\n self._rooms: list[Room] = []\n self._last_error: Optional[ErrorEvent] = None\n\n async def async_added_to_hass(self) ->None:\n \"\"\"Set up the event listeners now that hass is ready.\"\"\"\n await super().async_added_to_hass()\n\n async def on_battery(event: BatteryEvent) ->None:\n self._battery = event.value\n self.async_write_ha_state()\n\n async def on_custom_command(event: CustomCommandEvent) ->None:\n self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))\n\n async def on_error(event: ErrorEvent) ->None:\n self._last_error = event\n self.async_write_ha_state()\n\n async def on_fan_speed(event: FanSpeedEvent) ->None:\n self._fan_speed = event.speed\n self.async_write_ha_state()\n\n async def on_report_stats(event: ReportStatsEvent) ->None:\n self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))\n\n async def on_rooms(event: RoomsEvent) ->None:\n self._rooms = event.rooms\n self.async_write_ha_state()\n\n async def on_status(event: StatusEvent) ->None:\n self._state = event.state\n self.async_write_ha_state()\n listeners: list[EventListener] = [self._vacuum_bot.events.subscribe\n (BatteryEvent, on_battery), self._vacuum_bot.events.subscribe(\n CustomCommandEvent, on_custom_command), self._vacuum_bot.events\n .subscribe(ErrorEvent, on_error), self._vacuum_bot.events.\n subscribe(FanSpeedEvent, on_fan_speed), self._vacuum_bot.events\n .subscribe(ReportStatsEvent, on_report_stats), self._vacuum_bot\n .events.subscribe(RoomsEvent, on_rooms), self._vacuum_bot.\n events.subscribe(StatusEvent, on_status)]\n self.async_on_remove(lambda : unsubscribe_listeners(listeners))\n\n @property\n def supported_features(self) ->int:\n \"\"\"Flag vacuum cleaner robot features that are supported.\"\"\"\n return SUPPORT_DEEBOT\n\n @property\n def state(self) ->StateType:\n \"\"\"Return the state of the vacuum cleaner.\"\"\"\n if self._state is not None and self.available:\n return VACUUMSTATE_TO_STATE[self._state]\n\n @property\n def battery_level(self) ->Optional[int]:\n \"\"\"Return the battery level of the vacuum cleaner.\"\"\"\n return self._battery\n\n @property\n def fan_speed(self) ->Optional[str]:\n \"\"\"Return the fan speed of the vacuum cleaner.\"\"\"\n return self._fan_speed\n\n @property\n def fan_speed_list(self) ->list[str]:\n \"\"\"Get the list of available fan speed steps of the vacuum cleaner.\"\"\"\n return [level.display_name for level in FanSpeedLevel]\n\n @property\n def extra_state_attributes(self) ->Optional[Mapping[str, Any]]:\n \"\"\"Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n \"\"\"\n attributes: dict[str, Any] = {}\n rooms: dict[str, Any] = {}\n for room in self._rooms:\n room_name = slugify(room.subtype)\n room_values = rooms.get(room_name)\n if room_values is None:\n rooms[room_name] = room.id\n elif isinstance(room_values, list):\n room_values.append(room.id)\n else:\n rooms[room_name] = [room_values, room.id]\n if rooms:\n attributes['rooms'] = rooms\n if self._last_error:\n attributes[LAST_ERROR\n ] = f'{self._last_error.description} ({self._last_error.code})'\n return attributes\n\n async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) ->None:\n \"\"\"Set fan speed.\"\"\"\n await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))\n\n async def async_return_to_base(self, **kwargs: Any) ->None:\n \"\"\"Set the vacuum cleaner to return to the dock.\"\"\"\n await self._vacuum_bot.execute_command(Charge())\n\n async def async_stop(self, **kwargs: Any) ->None:\n \"\"\"Stop the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))\n\n async def async_pause(self) ->None:\n \"\"\"Pause the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))\n\n async def async_start(self) ->None:\n \"\"\"Start the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.START))\n\n async def async_locate(self, **kwargs: Any) ->None:\n \"\"\"Locate the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(PlaySound())\n\n async def async_send_command(self, command: str, params: Optional[dict[\n str, Any]]=None, **kwargs: Any) ->None:\n \"\"\"Send a command to a vacuum cleaner.\"\"\"\n _LOGGER.debug('async_send_command %s with %s', command, params)\n if command in ['relocate', SetRelocationState.name]:\n _LOGGER.warning(\n 'DEPRECATED! Please use relocate button entity instead.')\n await self._vacuum_bot.execute_command(SetRelocationState())\n elif command == 'auto_clean':\n clean_type = params.get('type', 'auto') if params else 'auto'\n if clean_type == 'auto':\n _LOGGER.warning(\n 'DEPRECATED! Please use \"vacuum.start\" instead.')\n await self.async_start()\n elif command in ['spot_area', 'custom_area', 'set_water']:\n if params is None:\n raise RuntimeError('Params are required!')\n if command in 'spot_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.SPOT_AREA, area=str(params['rooms']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'custom_area':\n await self._vacuum_bot.execute_command(CleanArea(mode=\n CleanMode.CUSTOM_AREA, area=str(params['coordinates']),\n cleanings=params.get('cleanings', 1)))\n elif command == 'set_water':\n _LOGGER.warning(\n 'DEPRECATED! Please use water select entity instead.')\n await self._vacuum_bot.execute_command(SetWaterInfo(params[\n 'amount']))\n else:\n await self._vacuum_bot.execute_command(CustomCommand(command,\n params))\n\n async def _service_refresh(self, part: str) ->None:\n \"\"\"Service to manually refresh.\"\"\"\n _LOGGER.debug('Manually refresh %s', part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s',\n part)\n", "step-5": "\"\"\"Support for Deebot Vaccums.\"\"\"\nimport logging\nfrom typing import Any, Mapping, Optional\n\nimport voluptuous as vol\nfrom deebot_client.commands import (\n Charge,\n Clean,\n FanSpeedLevel,\n PlaySound,\n SetFanSpeed,\n SetRelocationState,\n SetWaterInfo,\n)\nfrom deebot_client.commands.clean import CleanAction, CleanArea, CleanMode\nfrom deebot_client.commands.custom import CustomCommand\nfrom deebot_client.events import (\n BatteryEvent,\n CustomCommandEvent,\n ErrorEvent,\n FanSpeedEvent,\n ReportStatsEvent,\n RoomsEvent,\n StatusEvent,\n)\nfrom deebot_client.events.event_bus import EventListener\nfrom deebot_client.models import Room, VacuumState\nfrom deebot_client.vacuum_bot import VacuumBot\nfrom homeassistant.components.vacuum import (\n SUPPORT_BATTERY,\n SUPPORT_FAN_SPEED,\n SUPPORT_LOCATE,\n SUPPORT_MAP,\n SUPPORT_PAUSE,\n SUPPORT_RETURN_HOME,\n SUPPORT_SEND_COMMAND,\n SUPPORT_START,\n SUPPORT_STATE,\n SUPPORT_STOP,\n StateVacuumEntity,\n StateVacuumEntityDescription,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import entity_platform\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import StateType\nfrom homeassistant.util import slugify\n\nfrom .const import (\n DOMAIN,\n EVENT_CLEANING_JOB,\n EVENT_CUSTOM_COMMAND,\n LAST_ERROR,\n REFRESH_MAP,\n REFRESH_STR_TO_EVENT_DTO,\n VACUUMSTATE_TO_STATE,\n)\nfrom .entity import DeebotEntity\nfrom .hub import DeebotHub\nfrom .util import dataclass_to_dict, unsubscribe_listeners\n\n_LOGGER = logging.getLogger(__name__)\n\nSUPPORT_DEEBOT: int = (\n SUPPORT_PAUSE\n | SUPPORT_STOP\n | SUPPORT_RETURN_HOME\n | SUPPORT_FAN_SPEED\n | SUPPORT_BATTERY\n | SUPPORT_SEND_COMMAND\n | SUPPORT_LOCATE\n | SUPPORT_MAP\n | SUPPORT_STATE\n | SUPPORT_START\n)\n\n# Must be kept in sync with services.yaml\nSERVICE_REFRESH = \"refresh\"\nSERVICE_REFRESH_PART = \"part\"\nSERVICE_REFRESH_SCHEMA = {\n vol.Required(SERVICE_REFRESH_PART): vol.In(\n [*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]\n )\n}\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Add entities for passed config_entry in HA.\"\"\"\n hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]\n\n new_devices = []\n for vacbot in hub.vacuum_bots:\n new_devices.append(DeebotVacuum(vacbot))\n\n if new_devices:\n async_add_entities(new_devices)\n\n platform = entity_platform.async_get_current_platform()\n\n platform.async_register_entity_service(\n SERVICE_REFRESH,\n SERVICE_REFRESH_SCHEMA,\n \"_service_refresh\",\n )\n\n\nclass DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore\n \"\"\"Deebot Vacuum.\"\"\"\n\n def __init__(self, vacuum_bot: VacuumBot):\n \"\"\"Initialize the Deebot Vacuum.\"\"\"\n device_info = vacuum_bot.device_info\n if device_info.nick is not None:\n name: str = device_info.nick\n else:\n # In case there is no nickname defined, use the device id\n name = device_info.did\n\n super().__init__(vacuum_bot, StateVacuumEntityDescription(key=\"\", name=name))\n\n self._battery: Optional[int] = None\n self._fan_speed: Optional[str] = None\n self._state: Optional[VacuumState] = None\n self._rooms: list[Room] = []\n self._last_error: Optional[ErrorEvent] = None\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Set up the event listeners now that hass is ready.\"\"\"\n await super().async_added_to_hass()\n\n async def on_battery(event: BatteryEvent) -> None:\n self._battery = event.value\n self.async_write_ha_state()\n\n async def on_custom_command(event: CustomCommandEvent) -> None:\n self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))\n\n async def on_error(event: ErrorEvent) -> None:\n self._last_error = event\n self.async_write_ha_state()\n\n async def on_fan_speed(event: FanSpeedEvent) -> None:\n self._fan_speed = event.speed\n self.async_write_ha_state()\n\n async def on_report_stats(event: ReportStatsEvent) -> None:\n self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))\n\n async def on_rooms(event: RoomsEvent) -> None:\n self._rooms = event.rooms\n self.async_write_ha_state()\n\n async def on_status(event: StatusEvent) -> None:\n self._state = event.state\n self.async_write_ha_state()\n\n listeners: list[EventListener] = [\n self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),\n self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),\n self._vacuum_bot.events.subscribe(ErrorEvent, on_error),\n self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),\n self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),\n self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),\n self._vacuum_bot.events.subscribe(StatusEvent, on_status),\n ]\n self.async_on_remove(lambda: unsubscribe_listeners(listeners))\n\n @property\n def supported_features(self) -> int:\n \"\"\"Flag vacuum cleaner robot features that are supported.\"\"\"\n return SUPPORT_DEEBOT\n\n @property\n def state(self) -> StateType:\n \"\"\"Return the state of the vacuum cleaner.\"\"\"\n if self._state is not None and self.available:\n return VACUUMSTATE_TO_STATE[self._state]\n\n @property\n def battery_level(self) -> Optional[int]:\n \"\"\"Return the battery level of the vacuum cleaner.\"\"\"\n return self._battery\n\n @property\n def fan_speed(self) -> Optional[str]:\n \"\"\"Return the fan speed of the vacuum cleaner.\"\"\"\n return self._fan_speed\n\n @property\n def fan_speed_list(self) -> list[str]:\n \"\"\"Get the list of available fan speed steps of the vacuum cleaner.\"\"\"\n return [level.display_name for level in FanSpeedLevel]\n\n @property\n def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:\n \"\"\"Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n \"\"\"\n attributes: dict[str, Any] = {}\n rooms: dict[str, Any] = {}\n for room in self._rooms:\n # convert room name to snake_case to meet the convention\n room_name = slugify(room.subtype)\n room_values = rooms.get(room_name)\n if room_values is None:\n rooms[room_name] = room.id\n elif isinstance(room_values, list):\n room_values.append(room.id)\n else:\n # Convert from int to list\n rooms[room_name] = [room_values, room.id]\n\n if rooms:\n attributes[\"rooms\"] = rooms\n\n if self._last_error:\n attributes[\n LAST_ERROR\n ] = f\"{self._last_error.description} ({self._last_error.code})\"\n\n return attributes\n\n async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:\n \"\"\"Set fan speed.\"\"\"\n await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))\n\n async def async_return_to_base(self, **kwargs: Any) -> None:\n \"\"\"Set the vacuum cleaner to return to the dock.\"\"\"\n await self._vacuum_bot.execute_command(Charge())\n\n async def async_stop(self, **kwargs: Any) -> None:\n \"\"\"Stop the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))\n\n async def async_pause(self) -> None:\n \"\"\"Pause the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))\n\n async def async_start(self) -> None:\n \"\"\"Start the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(Clean(CleanAction.START))\n\n async def async_locate(self, **kwargs: Any) -> None:\n \"\"\"Locate the vacuum cleaner.\"\"\"\n await self._vacuum_bot.execute_command(PlaySound())\n\n async def async_send_command(\n self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any\n ) -> None:\n \"\"\"Send a command to a vacuum cleaner.\"\"\"\n _LOGGER.debug(\"async_send_command %s with %s\", command, params)\n\n if command in [\"relocate\", SetRelocationState.name]:\n _LOGGER.warning(\"DEPRECATED! Please use relocate button entity instead.\")\n await self._vacuum_bot.execute_command(SetRelocationState())\n elif command == \"auto_clean\":\n clean_type = params.get(\"type\", \"auto\") if params else \"auto\"\n if clean_type == \"auto\":\n _LOGGER.warning('DEPRECATED! Please use \"vacuum.start\" instead.')\n await self.async_start()\n elif command in [\"spot_area\", \"custom_area\", \"set_water\"]:\n if params is None:\n raise RuntimeError(\"Params are required!\")\n\n if command in \"spot_area\":\n await self._vacuum_bot.execute_command(\n CleanArea(\n mode=CleanMode.SPOT_AREA,\n area=str(params[\"rooms\"]),\n cleanings=params.get(\"cleanings\", 1),\n )\n )\n elif command == \"custom_area\":\n await self._vacuum_bot.execute_command(\n CleanArea(\n mode=CleanMode.CUSTOM_AREA,\n area=str(params[\"coordinates\"]),\n cleanings=params.get(\"cleanings\", 1),\n )\n )\n elif command == \"set_water\":\n _LOGGER.warning(\"DEPRECATED! Please use water select entity instead.\")\n await self._vacuum_bot.execute_command(SetWaterInfo(params[\"amount\"]))\n else:\n await self._vacuum_bot.execute_command(CustomCommand(command, params))\n\n async def _service_refresh(self, part: str) -> None:\n \"\"\"Service to manually refresh.\"\"\"\n _LOGGER.debug(\"Manually refresh %s\", part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s', part)\n", "step-ids": [ 5, 6, 9, 10, 13 ] }
[ 5, 6, 9, 10, 13 ]
# strspn(str1,str2) str1 = '12345678' str2 = '456' # str1 and chars both in str1 and str2 print(str1 and str2) str1 = 'cekjgdklab' str2 = 'gka' nPos = -1 for c in str1: if c in str2: nPos = str1.index(c) break print(nPos)
normal
{ "blob_id": "5c30b0e952ddf2e05a7ad5f8d9bbd4f5e22f887d", "index": 62, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(str1 and str2)\n<mask token>\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "step-3": "str1 = '12345678'\nstr2 = '456'\nprint(str1 and str2)\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "step-4": "# strspn(str1,str2)\nstr1 = '12345678'\nstr2 = '456'\n# str1 and chars both in str1 and str2\nprint(str1 and str2)\n\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
a= input("Enter number") a= a.split() b=[] for x in a: b.append(int(x)) print(b) l=len(b) c=0 s=0 for i in range(l): s=len(b[:i]) for j in range(s): if b[s]<b[j]: c=b[s] b.pop(s) b.insert(b.index(b[j]),c) print(b,b[:i],b[s])
normal
{ "blob_id": "24de4f486d4e976850e94a003f8d9cbe3e518402", "index": 33, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor x in a:\n b.append(int(x))\nprint(b)\n<mask token>\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n", "step-3": "a = input('Enter number')\na = a.split()\nb = []\nfor x in a:\n b.append(int(x))\nprint(b)\nl = len(b)\nc = 0\ns = 0\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n", "step-4": "a= input(\"Enter number\")\r\na= a.split()\r\nb=[]\r\nfor x in a:\r\n b.append(int(x)) \r\n\r\nprint(b)\r\nl=len(b)\r\nc=0\r\ns=0\r\nfor i in range(l):\r\n s=len(b[:i])\r\n for j in range(s):\r\n \r\n if b[s]<b[j]:\r\n c=b[s]\r\n b.pop(s)\r\n b.insert(b.index(b[j]),c)\r\n print(b,b[:i],b[s])\r\n\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from PIL import Image, ImageDraw, ImageFont import sys ### Create 1024,1024 pixel image with a white background. img = Image.new("RGB", (1024, 1024), color = (255,255,255)) ### Take text to be drawn on the image from the command terminal. text = sys.argv[1] ### Chose favourite font and set size of the font. fnt = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 150, encoding="unic") d = ImageDraw.Draw(img) d.text(xy=(320,420), text = text , font = fnt, fill=(0,0,0)) ### Save image as .png file. img.save(text+'.png')
normal
{ "blob_id": "053fa80c80d40cd28acb7d6a8bf1b2c30be9b36e", "index": 7786, "step-1": "<mask token>\n", "step-2": "<mask token>\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n", "step-3": "<mask token>\nimg = Image.new('RGB', (1024, 1024), color=(255, 255, 255))\ntext = sys.argv[1]\nfnt = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf',\n 150, encoding='unic')\nd = ImageDraw.Draw(img)\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n", "step-4": "from PIL import Image, ImageDraw, ImageFont\nimport sys\nimg = Image.new('RGB', (1024, 1024), color=(255, 255, 255))\ntext = sys.argv[1]\nfnt = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf',\n 150, encoding='unic')\nd = ImageDraw.Draw(img)\nd.text(xy=(320, 420), text=text, font=fnt, fill=(0, 0, 0))\nimg.save(text + '.png')\n", "step-5": "from PIL import Image, ImageDraw, ImageFont\nimport sys\n\n### Create 1024,1024 pixel image with a white background.\nimg = Image.new(\"RGB\", (1024, 1024), color = (255,255,255))\n\n### Take text to be drawn on the image from the command terminal.\ntext = sys.argv[1]\n\n### Chose favourite font and set size of the font.\nfnt = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", 150, encoding=\"unic\")\nd = ImageDraw.Draw(img)\n\nd.text(xy=(320,420), text = text , font = fnt, fill=(0,0,0))\n\n### Save image as .png file.\nimg.save(text+'.png')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import xlrd from django.shortcuts import redirect from django.contrib import messages from django.utils.translation import ugettext_lazy as _ from django.core import validators from utils.views import render_to from accounts.models import Account from .models import ExternalSubscriber from .forms import ExternalSubscriberUpload def validate_email(value, row_number): error_message = _(u'Invalid e-mail address on "%d" line.') return validators.EmailValidator( validators.email_re, unicode(error_message % row_number), 'invalid' )(value) def upload_handler(file_obj, path_to_save): destination = open(path_to_save, 'wb+') for chunk in file_obj.chunks(): destination.write(chunk) destination.close() def get_externalsubscribers(file_obj): pass_count = 0 fail_count = 0 PATH = '/tmp/import_subscribers.xls' upload_handler(file_obj, PATH) sheet = xlrd.open_workbook(PATH).sheet_by_index(0) for i in range(1,sheet.nrows): row = sheet.row(i) if not row[0].value: continue subscriber = {} subscriber['email'] = row[0].value try: validate_email(subscriber['email'].strip(), i) pass_count+=1 except Exception as e: fail_count+=1 #print e, u'"%s"' % subscriber['email'] continue try: subscriber['first_name'] = row[1].value except IndexError: pass try: subscriber['last_name'] = row[2].value except IndexError: pass if not bool(Account.objects.filter(email=subscriber['email']).only('id')): obj, created = ExternalSubscriber.objects.get_or_create( email=subscriber['email'], defaults={ 'first_name': subscriber.get('first_name'), 'last_name': subscriber.get('last_name'), } ) if not created: for field in ['first_name', 'last_name']: if subscriber.get(field) and\ getattr(obj, field) != subscriber.get(field): setattr(obj, field, subscriber.get(field)) obj.save() return pass_count, fail_count @render_to('newsletter/import_subscribers_form.html') def import_subscribers(request): if request.method == 'POST': form = ExternalSubscriberUpload(request.POST, request.FILES) if form.is_valid(): passed, failed = get_externalsubscribers(form.cleaned_data['xls']) messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed}) return redirect('admin:newsletter_externalsubscriber_changelist') else: form = ExternalSubscriberUpload() return {'form': form}
normal
{ "blob_id": "2ec41e02c95a270455c096e85829b7220eeda0c7", "index": 1317, "step-1": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\n<mask token>\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n", "step-4": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\nfrom utils.views import render_to\nfrom accounts.models import Account\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n", "step-5": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\n\nfrom utils.views import render_to\nfrom accounts.models import Account\n\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(\n validators.email_re,\n unicode(error_message % row_number),\n 'invalid'\n )(value)\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1,sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count+=1\n except Exception as e:\n fail_count+=1\n #print e, u'\"%s\"' % subscriber['email']\n continue\n\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n\n if not bool(Account.objects.filter(email=subscriber['email']).only('id')):\n obj, created = ExternalSubscriber.objects.get_or_create(\n email=subscriber['email'],\n defaults={\n 'first_name': subscriber.get('first_name'),\n 'last_name': subscriber.get('last_name'),\n }\n )\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and\\\n getattr(obj, field) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n\n return pass_count, fail_count\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed})\n\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/python import math def Main(): try: radius = float(input("Please enter the radius: ")) area = math.pi * radius**2 print("Area =", area) except: print("You did not enter a number") if __name__ == "__main__": Main()
normal
{ "blob_id": "33c4e0504425c5d22cefb9b4c798c3fd56a63771", "index": 3641, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef Main():\n try:\n radius = float(input('Please enter the radius: '))\n area = math.pi * radius ** 2\n print('Area =', area)\n except:\n print('You did not enter a number')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef Main():\n try:\n radius = float(input('Please enter the radius: '))\n area = math.pi * radius ** 2\n print('Area =', area)\n except:\n print('You did not enter a number')\n\n\nif __name__ == '__main__':\n Main()\n", "step-4": "import math\n\n\ndef Main():\n try:\n radius = float(input('Please enter the radius: '))\n area = math.pi * radius ** 2\n print('Area =', area)\n except:\n print('You did not enter a number')\n\n\nif __name__ == '__main__':\n Main()\n", "step-5": "#!/usr/bin/python\nimport math\n\ndef Main():\n\ttry:\n\t\tradius = float(input(\"Please enter the radius: \"))\n\t\tarea = math.pi * radius**2\n\t\tprint(\"Area =\", area)\n\texcept:\n\t\tprint(\"You did not enter a number\")\n\nif __name__ == \"__main__\":\n\tMain()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Image Check / Compress Image""" import re import os from PIL import Image from common.constant import PATH def check_image(file_type): match = re.match("image/*", file_type) return match def compress_image(data): with open(PATH.format(data['name']), 'wb+') as file: file.write(data['binary']) image = Image.open(PATH.format(data['name'])) new_img = image.resize((128, 128)) new_img.save(PATH.format(data['name'])) with open(PATH.format(data['name']), 'rb') as image_file: image = image_file.read() os.remove(PATH.format(data['name'])) return image
normal
{ "blob_id": "13fa650557a4a8827c9fb2e514bed178df19a32c", "index": 1295, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n", "step-4": "<mask token>\nimport re\nimport os\nfrom PIL import Image\nfrom common.constant import PATH\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n", "step-5": "\"\"\" Image Check / Compress Image\"\"\"\n\nimport re\nimport os\nfrom PIL import Image\n\nfrom common.constant import PATH\n\n\ndef check_image(file_type):\n match = re.match(\"image/*\", file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Obtener en otra lista unicamente números impares: my_list = [1, 4, 5, 6, 9, 13, 19, 21] # Vamos a hacer una list comprehension: lista_impares = [num for num in my_list if num % 2 != 0] print(my_list) print(lista_impares) print('') # Vamos a usar filter: lista_pares = list(filter(lambda x: x % 2 == 0 , my_list)) print(my_list) print(lista_pares)
normal
{ "blob_id": "e1913c80375e4871119182d0267e9f228818624f", "index": 4309, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(my_list)\nprint(lista_impares)\nprint('')\n<mask token>\nprint(my_list)\nprint(lista_pares)\n", "step-3": "my_list = [1, 4, 5, 6, 9, 13, 19, 21]\nlista_impares = [num for num in my_list if num % 2 != 0]\nprint(my_list)\nprint(lista_impares)\nprint('')\nlista_pares = list(filter(lambda x: x % 2 == 0, my_list))\nprint(my_list)\nprint(lista_pares)\n", "step-4": "# Obtener en otra lista unicamente números impares:\n\nmy_list = [1, 4, 5, 6, 9, 13, 19, 21]\n\n# Vamos a hacer una list comprehension:\nlista_impares = [num for num in my_list if num % 2 != 0]\nprint(my_list)\nprint(lista_impares)\nprint('')\n\n\n# Vamos a usar filter:\nlista_pares = list(filter(lambda x: x % 2 == 0 , my_list))\nprint(my_list)\nprint(lista_pares)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import re from .models import ValidatedStudent from rest_framework.authtoken.models import Token from django.contrib.auth.models import User def get_token_from_request(request): token_tuple = request.COOKIES.get('money_api_token') matches = re.search(r'(<Token: (\S*)>)', token_tuple) token = matches.groups(0)[1] return token def get_student_from_request(request): current_token = get_token_from_request(request) current_user = Token.objects.filter(key=current_token).last().user current_email = User.objects.filter(username=current_user).last().email return ValidatedStudent.objects.filter(email=current_email).last()
normal
{ "blob_id": "2187f38dc9b14ecc355e98fe15d36fdefd548f04", "index": 1159, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n", "step-4": "import re\nfrom .models import ValidatedStudent\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n", "step-5": "import re\nfrom .models import ValidatedStudent\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search(r'(<Token: (\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Module for the bot""" from copy import deepcopy from time import sleep import mcpi.minecraft as minecraft from mcpi.vec3 import Vec3 import mcpi.block as block from search import SearchProblem, astar, bfs from singleton import singleton _AIR = block.AIR.id _WATER = block.WATER.id _LAVA = block.LAVA.id _BEDROCK = block.BEDROCK.id _DROP = 2 # It can drop at most this many _DROP_PLUS_1 = _DROP + 1 _DELAY = 1 class _Vec3(Vec3): """A Vec3 that is hashable. Everything in this program should use this class.""" def __hash__(self): """Return the hash.""" return hash((self.x, self.y, self.z)) def clone(self): """Return a clone.""" return _Vec3(self.x, self.y, self.z) class _GenericBot: """A generic bot.""" def __init__(self, pos, inventory=None): """Initialize with an empty inventory. inventory is a dictionary. If None, an empty one will be used.""" if inventory is None: self._inventory = {} else: self._inventory = deepcopy(inventory) self._pos = deepcopy(pos) def take_action(self, action): """Take the action (acquired from _get_legal_actions).""" getattr(self, action['func'])( *action.get('args', ()), **action.get('kwargs', {}) ) def take_actions(self, actions, seconds=None): """Take these actions. If seconds is not None, sleep 'seconds' seconds. """ if not actions: return self.take_action(actions[0]) for action in actions[1:]: if seconds is not None: sleep(seconds) self.take_action(action) def get_pos(self): """Return the position.""" return deepcopy(self._pos) def get_legal_actions(self, block_=None): """Return a list of legal actions. If block_ is None, return all legal actions. Otherwise, return all legal actions that don't involve placing the block.""" return self._get_move_actions(block_) + self._get_mine_actions() + \ self._get_placement_actions(block_) def contains(self, block_): """Return whether or not the bot contains the block id.""" return block_ in self._inventory def _get_block(self, pos): """Get the block at the position.""" raise NotImplementedError def _place(self, loc, exclude=None, block_=None): """Place a block from the inventory only. If exclude is not None, place a block that is not 'exclude'. If block is not None, place that block only. """ if not self._inventory: raise Exception('Inventory empty') if block_ is None: for key in self._inventory: if key != exclude: block_ = key break else: raise Exception(( 'You requested not to place %s, but it is the only ' 'block in the inventory.' % exclude )) if block_ not in self._inventory: raise Exception('Block %s is not in the inventory' % block_) if self._inventory[block_] == 1: del self._inventory[block_] else: self._inventory[block_] -= 1 self._set_block(loc, block_) def _move_down(self): """Move and mine the block below.""" new_pos = self._pos + _Vec3(0, -1, 0) block_ = self._get_block(new_pos) if block_ != _WATER: self._add_to_inv(block_) self._move(new_pos) def _add_to_inv(self, block_): """Add the block to the inventory.""" if block_ in self._inventory: self._inventory[block_] += 1 else: self._inventory[block_] = 1 def _move_up(self, exclude=None): """Move and place a block below. If exclude is not None, place a block that is not 'exclude'. """ self._move(self._pos + _Vec3(0, 1, 0)) self._place(self._pos + _Vec3(0, -1, 0), exclude) def _mine(self, loc): """Mine the block.""" block_ = self._get_block(loc) self._add_to_inv(block_) self._set_block(loc, _AIR) def _get_move_actions(self, exclude=None): """Return a list of legal movement actions. exclude is the block to exclude. """ rtn = [] # Check for moving up can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER} if can_move_up: if self._surrounded(): rtn.append({ 'func': '_move', 'args': (self._pos + _Vec3(0, 1, 0),) }) else: rtn.append({ 'func': '_move_up', 'args': (exclude,) }) # Check for moving down hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0)) if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}: rtn.append({'func': '_move_down'}) # Check for side moves for dir_ in _adj_dirs(): rtn.extend(self._side_moves(dir_, can_move_up)) return rtn def _side_moves(self, dir_, can_move_up): """Return the list of side moves. dir_ is an adjacent direction. can_move_up is a boolean for whether or not the bot can move up. """ rtn = [] base_pos = self._pos + dir_ base_block = self._get_block(base_pos) empty_blocks = {_AIR, _WATER} # Check if it can move up if can_move_up and base_block not in {_AIR, _LAVA, _WATER}: for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break else: rtn.append({ 'func': '_move', 'args': (base_pos + _Vec3(0, 1, 0),) }) # Check if it can move in that direction for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break # Fall else: pos = base_pos + _Vec3(0, -1, 0) for _ in xrange(_DROP_PLUS_1): block_ = self._get_block(pos) if block_ != _AIR: if block_ != _LAVA: rtn.append({ 'func': '_move', 'args': (pos + _Vec3(0, 1, 0),) }) break pos.y -= 1 def _surrounded(self): """Return whether or not the bot is surrounded by water.""" for dir_ in _adj_dirs(): if self._get_block(self._pos + dir_) != _WATER: return False return True def _get_mine_actions(self): """Return a list of legal mining actions (that only involve mining and not moving).""" rtn = [] dont_mine = {_AIR, _WATER, _LAVA} # Mine above. pos_above = self._pos + _Vec3(0, 2, 0) if self._get_block(pos_above) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos_above,) }) for dir_ in _adj_dirs(): pos = self._pos + dir_ for _ in xrange(2): if self._get_block(pos) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos,) }) pos = pos + _Vec3(0, 1, 0) return rtn def _get_placement_actions(self, exclude=None): """Return a list of legal actions that only involve placing a block from the inventory. exclude is a block id. It is the block that should not be placed. If None, any block can be placed.""" if not self._has_blocks_to_place(exclude=exclude): return [] dirs = [_Vec3(0, 2, 0)] for dir_ in _adj_dirs(): dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)]) if self._get_block(self._pos + dir_) in [_AIR, _WATER]: dirs.append(dir_ + _Vec3(0, -1, 0)) rtn = [] for dir_ in dirs: pos = self._pos + dir_ if self._can_place(pos): rtn.append({ 'func': '_place', 'args': (pos,), 'kwargs': {'exclude': exclude} }) return rtn def _can_place(self, loc): """Return whether or not the bot can place a block at that location independent of what it has in its inventory.""" non_blocks = [_AIR, _WATER, _LAVA] player = [self._pos, self._pos + _Vec3(0, 1, 0)] for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]: new_loc = loc + dir_ if new_loc not in player and self._get_block(new_loc) \ not in non_blocks: return True return False def _has_blocks_to_place(self, exclude=None): """Return whether or not the bot can place a block from the inventory. If exclude is None, any block can be placed.""" for block_ in self._inventory: if block_ != exclude: return True return False def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" raise NotImplementedError def _move(self, pos): """Move there only.""" self._pos = deepcopy(pos) class _ImaginaryBot(_GenericBot): """A bot used for finding paths that doesn't actually change blocks in the world.""" def __init__(self, pos, inventory=None): """Create a new bot.""" _GenericBot.__init__(self, pos, inventory) self._changes = {} # Changes to the world def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" self._changes[deepcopy(pos)] = block def _get_block(self, pos): """Get the block at the position.""" if pos in self._changes: return self._changes[pos] else: return _get_mc().getBlock(pos) def get_block(self, pos): """The public version.""" return self._get_block(pos) def __hash__(self): """Return the hash.""" return hash(frozenset([self._pos] + \ _key_vals(self._inventory) + \ _key_vals(self._changes) )) class Bot(_GenericBot): """The real bot. All vector arguments are Vec3s.""" _BOT_BLOCK = block.IRON_BLOCK.id def __init__(self): """Create a bot next to the player.""" pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0) pos = _Vec3(pos.x, pos.y, pos.z) _GenericBot.__init__(self, pos) self._pos = pos self._move(self._pos) @staticmethod def destroy_all(): """Destroy all bots within a small distance (in case I forget to destroy one).""" player_loc = _player_loc() minec = _get_mc() rad = 10 for x in xrange(player_loc.x - rad, player_loc.x + rad): for y in xrange(player_loc.y - rad, player_loc.y + rad): for z in xrange(player_loc.z - rad, player_loc.z + rad): if minec.getBlock(x, y, z) == Bot._BOT_BLOCK: minec.setBlock(x, y, z, _AIR) def destroy(self): """Set itself to air.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) def fetch(self, block_name): """Mine and return a block to the player.""" imag_bot = _ImaginaryBot(self._pos, self._inventory) block_id = getattr(block, block_name).id block_loc = self._get_block_loc(block_id) mine_prob = _MineProblem(imag_bot, block_loc, block_id) mine_actions = astar(mine_prob, _mine_heuristic) self.take_actions(mine_actions, _DELAY) imag_bot = _ImaginaryBot(self._pos, self._inventory) player_loc = _player_loc() return_prob = _ReturnProblem(imag_bot, block_id, player_loc) return_actions = astar(return_prob, _return_heuristic) imag_bot.take_actions(return_actions) return_actions.append({ 'func': '_place', 'args': (imag_bot.get_pos() + player_loc) / 2, 'kwargs': {'block': block_id} }) self.take_actions(return_actions, _DELAY) def _get_block_loc(self, block_id): """Return the location of the block.""" find_prob = FindProblem(self._pos, block_id) dirs = bfs(find_prob) return self._pos + sum(dirs) def _set_block(self, pos, block_): """Place an actual block in the world. block is a block id.""" _get_mc().setBlock(pos, block_) def _get_block(self, pos): """Get the block at the position.""" return _get_mc().getBlock(pos) def _move(self, pos): """Move there, and set the appropriate blocks.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) self._set_block(pos, self._BOT_BLOCK) self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK) self._pos = pos class FindProblem(SearchProblem): """Problem for finding the location of a block in the world. A state in this problem is a location. """ def __init__(self, start_loc, block_id): """Initialize.""" self._start_loc = deepcopy(start_loc) self._block_id = block_id def getStartState(self): """Return the starting location.""" return self._start_loc def isGoalState(self, state): return _get_mc().getBlock(state) == self._block_id def getSuccessors(self, state): """Return the successors.""" rtn = [] for dir_ in _all_dirs(): successor = state + dir_ if successor.y <= _get_mc().getHeight(successor.x, successor.z) \ and _get_mc().getBlock(successor) != _BEDROCK: rtn.append((successor, dir_, 1)) return rtn class _MineProblem(SearchProblem): """The problem of finding the block and mining it (not returning it).""" def __init__(self, imag_bot, block_loc, block_id): """Initialize the problem with an _ImaginaryBot. block_loc is a Vec3. """ self._bot = imag_bot self._block_loc = deepcopy(block_loc) self._block_id = block_id def get_block_loc(self): """Return the block location.""" return deepcopy(self._block_loc) def get_block_id(self): """Return the block it's trying to mine.""" return self._block_id def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot has the block.""" return state.contains(self._block_id) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn class _ReturnProblem(SearchProblem): """The problem of returning to the player. This does not place the block next to the player.""" def __init__(self, imag_bot, block_, player_loc): """Initialized the problem with an _ImaginaryBot. block is a block id.""" self._bot = imag_bot self._block = block_ self._player_loc = player_loc def get_player_loc(self): """Return the player location.""" return deepcopy(self._player_loc) def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot is next to the player.""" diff = state.get_pos() - self._player_loc return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \ abs(diff.x) + abs(diff.z) == 2 and \ state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \ (_AIR, _LAVA, _WATER) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(self._block): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn def _mine_heuristic(bot, problem): """Return the mining heuristic. bot is an _ImaginaryBot. """ if bot.contains(problem.get_block_id()): return 0 bot_pos = bot.get_pos() dest_pos = problem.get_block_loc() # If man == dy: return man + 1 # If man > dy: return man # If man < dy: return dy? man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z)) y_diff = bot_pos.y - dest_pos.y if y_diff < 0: y_diff += 1 if y_diff == 0: return man_dist # Transform so that it's only dropping drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) if man_dist > drops: return man_dist if man_dist == drops: return man_dist + 1 if drop == 1: return drops if y_diff % drop == 1: return drops return drops + 1 def _drops(dist, drop): """Return the number of times it takes to drop a distance dist. drop is the length of one drop. Both are assumed positive.""" rtn = dist / drop if dist % drop != 0: rtn += 1 return rtn def _return_heuristic(bot, problem): """Return the return heuristic. bot is an _ImaginaryBot. """ bot_pos = bot.get_pos() player_pos = problem.get_player_loc() bot_plane_pos = (bot.x, bot.z) y_diff = bot_pos.y - player_pos.y drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) min_man = float('inf') for dir_ in _adj_dirs(): loc = player_pos + 2 * dir_ man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z)) if man_dist < min_man: min_man = man_dist if man_dist < drops: return drops return min_man def _to_my_vec3(vec): """Return the _Vec3 alternative of the Vec3.""" return _Vec3(vec.x, vec.y, vec.z) def _player_loc(): """Return the player's location.""" return _to_my_vec3(_get_mc().player.getTilePos()) def _adj_dirs(): """Return the adjacent directions.""" return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)] def _all_dirs(): """Return all adjacent directions.""" return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)] def _manhattan(pos1, pos2): """Return the manhattan distance. pos1 and pos2 should be iterable.""" return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2)) @singleton def _get_mc(): """Return the Minecraft instance.""" return minecraft.Minecraft.create() def _key_vals(dict_): """Return a list of key-val tuples.""" return [(key, val) for key, val in dict_.iteritems()]
normal
{ "blob_id": "54f0ed5f705d5ada28721301f297b2b0058773ad", "index": 2, "step-1": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n <mask token>\n <mask token>\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n <mask token>\n <mask token>\n <mask token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <mask token>\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n <mask token>\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n <mask token>\n <mask token>\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n <mask token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <mask token>\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n <mask token>\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n", "step-5": "\"\"\"Module for the bot\"\"\"\n\nfrom copy import deepcopy\nfrom time import sleep\n\nimport mcpi.minecraft as minecraft\nfrom mcpi.vec3 import Vec3\nimport mcpi.block as block\n\nfrom search import SearchProblem, astar, bfs\nfrom singleton import singleton\n\n_AIR = block.AIR.id\n_WATER = block.WATER.id\n_LAVA = block.LAVA.id\n_BEDROCK = block.BEDROCK.id\n\n_DROP = 2 # It can drop at most this many\n_DROP_PLUS_1 = _DROP + 1\n_DELAY = 1\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception((\n 'You requested not to place %s, but it is the only '\n 'block in the inventory.' % exclude\n ))\n\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n\n self._set_block(loc, block_)\n \n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n \n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n\n # Check for moving up\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({\n 'func': '_move',\n 'args': (self._pos + _Vec3(0, 1, 0),)\n })\n else:\n rtn.append({\n 'func': '_move_up',\n 'args': (exclude,)\n })\n\n # Check for moving down\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n\n # Check for side moves \n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n\n # Check if it can move up\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({\n 'func': '_move',\n 'args': (base_pos + _Vec3(0, 1, 0),)\n })\n\n # Check if it can move in that direction\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n\n # Fall\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({\n 'func': '_move',\n 'args': (pos + _Vec3(0, 1, 0),)\n })\n break\n pos.y -= 1 \n \n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n # Mine above.\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos_above,)\n })\n\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos,)\n })\n pos = pos + _Vec3(0, 1, 0)\n\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc) \\\n not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {} # Changes to the world\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + \\\n _key_vals(self._inventory) + \\\n _key_vals(self._changes)\n ))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({\n 'func': '_place',\n 'args': (imag_bot.get_pos() + player_loc) / 2,\n 'kwargs': {'block': block_id}\n })\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z) \\\n and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \\\n abs(diff.x) + abs(diff.z) == 2 and \\\n state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \\\n (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n\n # If man == dy: return man + 1\n # If man > dy: return man\n # If man < dy: return dy?\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n\n if y_diff == 0:\n return man_dist\n\n # Transform so that it's only dropping\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n\n drops = _drops(y_diff, drop)\n\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n \n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n \n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = (bot.x, bot.z)\n\n y_diff = bot_pos.y - player_pos.y\n\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n\n", "step-ids": [ 52, 53, 58, 60, 79 ] }
[ 52, 53, 58, 60, 79 ]
# -*- coding: utf-8 -*- # @Time : 2022-03-09 21:51 # @Author : 袁肖瀚 # @FileName: WDCNN-DANN.py # @Software: PyCharm import torch import numpy as np import torch.nn as nn import argparse from model import WDCNN1 from torch.nn.init import xavier_uniform_ import torch.utils.data as Data import matplotlib.pylab as plt import wandb import os from matplotlib.ticker import FuncFormatter #定义wandb参数 hyperparameter_defaults = dict( epochs=70, batch_train=40, batch_val=50, batch_test=40, lr=0.0002, weight_decay=0.0005, r=0.02 ) wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN") config = wandb.config plt.rcParams['font.family'] = ['Times New Roman'] def to_percent(temp, position): return '%1.0f' % (temp) + '%' # model initialization 参数初始化 def weight_init(m): class_name = m.__class__.__name__ #得到网络层的名字 if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1 xavier_uniform_(m.weight.data) if class_name.find('Linear') != -1: xavier_uniform_(m.weight.data) def batch_norm_init(m): class_name = m.__class__.__name__ if class_name.find('BatchNorm') != -1: m.reset_running_stats() # split train and split data def data_split_train(data_set, label_set): data_set_train = [] data_set_val = [] label_set_train = [] label_set_val = [] for i in range(data_set.shape[0]): #行数 shape[2]通道数 index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 '''] np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习 a = index[:int((data_set.shape[1]) * 0.8)] data = data_set[i] #第i行 data_train = data[a] data_val = np.delete(data, a, 0) data_set_train.append(data_train) data_set_val.append(data_val) label_set_train.extend(label_set[i][:len(data_train)]) label_set_val.extend(label_set[i][:len(data_val)]) data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1]) data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1]) label_set_train = np.array(label_set_train) label_set_val = np.array(label_set_val) return data_set_train, data_set_val, label_set_train, label_set_val # training process def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t): global alpha #torch.cuda.empty_cache() length = len(train_dataset.tensors[0]) optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay) train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True) val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False) val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False) t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致 # t_loader_iter = iter(t_loader) val_loss_s = [] val_loss_t = [] val_acc_s = [] val_acc_t = [] cross_loss = [] #暂时不知道作用 Source_Train_Acc=[] for epoch in range(config.epochs): # t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致 t_loader_iter = iter(t_loader) model.train() for index, (s_data_train, s_label_train) in enumerate(train_dataloader): p = float(index) / 20 alpha = 2. / (1. + np.exp(-10 * p)) - 1 t_data_train = t_loader_iter.next() s_data_train = s_data_train.float().to(device).unsqueeze(dim=1) t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1) s_label_train = s_label_train.long().to(device) s_domain_label = torch.zeros(config.batch_train).long().cuda() t_domain_label = torch.ones(config.batch_train).long().cuda() s_out_train, s_domain_out = model(s_data_train, alpha) t_out_train, t_domain_out = model(t_data_train, alpha) loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失 loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失 loss_c = criterion(s_out_train, s_label_train) #分类器损失 loss = loss_c + (loss_domain_s + loss_domain_t)*0.02 optimizer.zero_grad() loss.backward() optimizer.step() pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1 correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率 acc = 100. * correct_s.item() / len(s_data_train) Source_Train_Acc.append(acc) wandb.log({"Source Train Acc": acc}) if index % 2 == 0: print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format (epoch, config.epochs, (index + 1) * len(s_data_train), length, 100. * (config.batch_train * (index + 1) / length), loss_c.item(), loss_domain_s.item() + loss_domain_t.item() , acc)) #validation model.eval() #源域验证 correct_val_s = 0 sum_loss_s = 0 length_val_s = len(val_dataset_s) for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s): with torch.no_grad(): s_data_val = s_data_val.float().to(device).unsqueeze(dim=1) s_label_val = s_label_val.long().to(device) output_val_s, _ = model(s_data_val, alpha) loss_s = criterion(output_val_s, s_label_val) pred_val_s = torch.argmax(output_val_s.data, 1) correct_val_s += pred_val_s.eq(s_label_val).cpu().sum() sum_loss_s += loss_s acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率 average_loss_s = sum_loss_s.item() / length_val_s #源域损失 #目标域验证 correct_val_t = 0 sum_loss_t = 0 length_val_t = len(val_dataset_t) for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t): with torch.no_grad(): t_data_val = t_data_val.float().to(device).unsqueeze(dim=1) t_label_val = t_label_val.long().to(device) output_val_t, _ = model(t_data_val, alpha) loss_t = criterion(output_val_t, t_label_val) pred_val_t = torch.argmax(output_val_t.data, 1) correct_val_t += pred_val_t.eq(t_label_val).cpu().sum() sum_loss_t += loss_t acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率 average_loss_t = sum_loss_t.item() / length_val_t #目标域损失 metrics = {"Acc_val_t": acc_t, 'epoch':epoch} wandb.log(metrics) print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format( epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t)) val_loss_s.append(loss_s.item()) val_loss_t.append(loss_t.item()) val_acc_t.append(acc_t) val_acc_s.append(acc_s) torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth")) #画出验证集正确率曲线 plt.plot(val_acc_s, 'r-',marker='s') plt.plot(val_acc_t, 'g-',marker='*') plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"]) plt.xlabel('Epochs') plt.ylabel('validation accuracy') plt.title('Source doamin & Target domain Validation Accuracy Rate') plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent)) plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png") plt.show() #画出验证集损失 plt.plot(val_loss_s, 'r-',marker='o') plt.plot(val_loss_t, 'g-',marker='x') plt.legend(["Source domain validation Loss", "Target domain validation Loss"]) plt.xlabel('Epochs') plt.ylabel('val_loss') plt.title('Source domain & Target domain Validation Loss') plt.savefig("Source domain & Target domain Validation Loss") plt.show() # testing def test(test_dataset): model.eval() length = len(test_dataset) correct = 0 test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False) y_test = [] y_pred = [] for index, (data, label) in enumerate(test_loader): with torch.no_grad(): data = data.float().to(device) label = label.long().to(device) y_test.append(label) output, _ = model(data.unsqueeze(dim=1), alpha) pred = torch.argmax(output.data, 1) y_pred.append(pred) correct += pred.eq(label).cpu().sum() acc = 100. * correct / length return acc if __name__ == '__main__': torch.cuda.empty_cache() # use cpu or gpu if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' device = torch.device(device) # CWRU dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz') dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz') dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz') dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz') data_s_train_val = dataset_s_train['data'] data_s_test = dataset_s_test['data'].reshape(-1, 1024) data_t_train_val = dataset_t_train['data'] data_t_test = dataset_t_test['data'].reshape(-1, 1024) label_s_train_val = dataset_s_train['label'] label_s_test = dataset_s_test['label'].reshape(1, -1) label_t_train_val = dataset_t_train['label'] label_t_test = dataset_t_test['label'].reshape(1, -1) iteration_acc = [] test_acc_s = [] # repeat several times for an average result for iteration in range(1): # load model model = WDCNN1(C_in=1, class_num=10).to(device) model.apply(weight_init) model.apply(batch_norm_init) # train/val data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val) data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val) # transfer ndarray to tensor data_s_train = torch.from_numpy(data_s_train) data_s_val = torch.from_numpy(data_s_val) data_t_val = torch.from_numpy(data_t_val) #加的验证 data_s_test = torch.from_numpy(data_s_test) data_t_train = torch.from_numpy(data_t_train) data_t_test = torch.from_numpy(data_t_test) label_s_train = torch.from_numpy(label_s_train) label_s_val = torch.from_numpy(label_s_val) label_t_val = torch.from_numpy(label_t_val) #加的验证 label_s_test = torch.from_numpy(label_s_test) #label_t_train = torch.from_numpy(label_t_train) label_t_test = torch.from_numpy(label_t_test) # seal to data-set train_dataset_s = Data.TensorDataset(data_s_train, label_s_train) train_dataset_t = Data.TensorDataset(data_t_train) val_dataset_s = Data.TensorDataset(data_s_val, label_s_val) val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证 test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze()) test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze()) # print(train_dataset_s, val_dataset_s) criterion = nn.NLLLoss() train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t) s_test_acc = test(test_dataset_s) t_test_acc = test(test_dataset_t) print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc)) wandb.finish()
normal
{ "blob_id": "fd45657083942dee13f9939ce2a4b71ba3f67397", "index": 3587, "step-1": "<mask token>\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\n<mask token>\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\n<mask token>\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\n<mask token>\n", "step-3": "<mask token>\nwandb.init(config=hyperparameter_defaults, project='WDCNN-DANN')\n<mask token>\n\n\ndef to_percent(temp, position):\n return '%1.0f' % temp + '%'\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\nif __name__ == '__main__':\n torch.cuda.empty_cache()\n if torch.cuda.is_available():\n device = 'cuda'\n else:\n device = 'cpu'\n device = torch.device(device)\n dataset_s_train = np.load('bearing numpy data\\\\dataset_train_0HP_100.npz')\n dataset_s_test = np.load('bearing numpy data\\\\dataset_val_0HP_80.npz')\n dataset_t_train = np.load('bearing numpy data\\\\dataset_train_3HP_100.npz')\n dataset_t_test = np.load('bearing numpy data\\\\dataset_val_3HP_80.npz')\n data_s_train_val = dataset_s_train['data']\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\n data_t_train_val = dataset_t_train['data']\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\n label_s_train_val = dataset_s_train['label']\n label_s_test = dataset_s_test['label'].reshape(1, -1)\n label_t_train_val = dataset_t_train['label']\n label_t_test = dataset_t_test['label'].reshape(1, -1)\n iteration_acc = []\n test_acc_s = []\n for iteration in range(1):\n model = WDCNN1(C_in=1, class_num=10).to(device)\n model.apply(weight_init)\n model.apply(batch_norm_init)\n data_s_train, data_s_val, label_s_train, label_s_val = (\n data_split_train(data_s_train_val, label_s_train_val))\n data_t_train, data_t_val, _, label_t_val = data_split_train(\n data_t_train_val, label_t_train_val)\n data_s_train = torch.from_numpy(data_s_train)\n data_s_val = torch.from_numpy(data_s_val)\n data_t_val = torch.from_numpy(data_t_val)\n data_s_test = torch.from_numpy(data_s_test)\n data_t_train = torch.from_numpy(data_t_train)\n data_t_test = torch.from_numpy(data_t_test)\n label_s_train = torch.from_numpy(label_s_train)\n label_s_val = torch.from_numpy(label_s_val)\n label_t_val = torch.from_numpy(label_t_val)\n label_s_test = torch.from_numpy(label_s_test)\n label_t_test = torch.from_numpy(label_t_test)\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\n train_dataset_t = Data.TensorDataset(data_t_train)\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val)\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze()\n )\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze()\n )\n criterion = nn.NLLLoss()\n train(train_dataset_s, val_dataset_s, val_dataset_t, train_dataset_t)\n s_test_acc = test(test_dataset_s)\n t_test_acc = test(test_dataset_t)\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(\n s_test_acc, t_test_acc))\n wandb.finish()\n", "step-4": "import torch\nimport numpy as np\nimport torch.nn as nn\nimport argparse\nfrom model import WDCNN1\nfrom torch.nn.init import xavier_uniform_\nimport torch.utils.data as Data\nimport matplotlib.pylab as plt\nimport wandb\nimport os\nfrom matplotlib.ticker import FuncFormatter\nhyperparameter_defaults = dict(epochs=70, batch_train=40, batch_val=50,\n batch_test=40, lr=0.0002, weight_decay=0.0005, r=0.02)\nwandb.init(config=hyperparameter_defaults, project='WDCNN-DANN')\nconfig = wandb.config\nplt.rcParams['font.family'] = ['Times New Roman']\n\n\ndef to_percent(temp, position):\n return '%1.0f' % temp + '%'\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\nif __name__ == '__main__':\n torch.cuda.empty_cache()\n if torch.cuda.is_available():\n device = 'cuda'\n else:\n device = 'cpu'\n device = torch.device(device)\n dataset_s_train = np.load('bearing numpy data\\\\dataset_train_0HP_100.npz')\n dataset_s_test = np.load('bearing numpy data\\\\dataset_val_0HP_80.npz')\n dataset_t_train = np.load('bearing numpy data\\\\dataset_train_3HP_100.npz')\n dataset_t_test = np.load('bearing numpy data\\\\dataset_val_3HP_80.npz')\n data_s_train_val = dataset_s_train['data']\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\n data_t_train_val = dataset_t_train['data']\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\n label_s_train_val = dataset_s_train['label']\n label_s_test = dataset_s_test['label'].reshape(1, -1)\n label_t_train_val = dataset_t_train['label']\n label_t_test = dataset_t_test['label'].reshape(1, -1)\n iteration_acc = []\n test_acc_s = []\n for iteration in range(1):\n model = WDCNN1(C_in=1, class_num=10).to(device)\n model.apply(weight_init)\n model.apply(batch_norm_init)\n data_s_train, data_s_val, label_s_train, label_s_val = (\n data_split_train(data_s_train_val, label_s_train_val))\n data_t_train, data_t_val, _, label_t_val = data_split_train(\n data_t_train_val, label_t_train_val)\n data_s_train = torch.from_numpy(data_s_train)\n data_s_val = torch.from_numpy(data_s_val)\n data_t_val = torch.from_numpy(data_t_val)\n data_s_test = torch.from_numpy(data_s_test)\n data_t_train = torch.from_numpy(data_t_train)\n data_t_test = torch.from_numpy(data_t_test)\n label_s_train = torch.from_numpy(label_s_train)\n label_s_val = torch.from_numpy(label_s_val)\n label_t_val = torch.from_numpy(label_t_val)\n label_s_test = torch.from_numpy(label_s_test)\n label_t_test = torch.from_numpy(label_t_test)\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\n train_dataset_t = Data.TensorDataset(data_t_train)\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val)\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze()\n )\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze()\n )\n criterion = nn.NLLLoss()\n train(train_dataset_s, val_dataset_s, val_dataset_t, train_dataset_t)\n s_test_acc = test(test_dataset_s)\n t_test_acc = test(test_dataset_t)\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(\n s_test_acc, t_test_acc))\n wandb.finish()\n", "step-5": "# -*- coding: utf-8 -*-\r\n# @Time : 2022-03-09 21:51\r\n# @Author : 袁肖瀚\r\n# @FileName: WDCNN-DANN.py\r\n# @Software: PyCharm\r\nimport torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport argparse\r\nfrom model import WDCNN1\r\nfrom torch.nn.init import xavier_uniform_\r\nimport torch.utils.data as Data\r\nimport matplotlib.pylab as plt\r\nimport wandb\r\nimport os\r\nfrom matplotlib.ticker import FuncFormatter\r\n\r\n#定义wandb参数\r\nhyperparameter_defaults = dict(\r\n epochs=70,\r\n batch_train=40,\r\n batch_val=50,\r\n batch_test=40,\r\n lr=0.0002,\r\n weight_decay=0.0005,\r\n r=0.02\r\n)\r\n\r\nwandb.init(config=hyperparameter_defaults, project=\"WDCNN-DANN\")\r\nconfig = wandb.config\r\n\r\n\r\nplt.rcParams['font.family'] = ['Times New Roman']\r\n\r\ndef to_percent(temp, position):\r\n return '%1.0f' % (temp) + '%'\r\n\r\n# model initialization 参数初始化\r\ndef weight_init(m):\r\n class_name = m.__class__.__name__ #得到网络层的名字\r\n if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1\r\n xavier_uniform_(m.weight.data)\r\n if class_name.find('Linear') != -1:\r\n xavier_uniform_(m.weight.data)\r\n\r\ndef batch_norm_init(m):\r\n\r\n class_name = m.__class__.__name__\r\n if class_name.find('BatchNorm') != -1:\r\n m.reset_running_stats()\r\n\r\n\r\n# split train and split data\r\ndef data_split_train(data_set, label_set):\r\n data_set_train = []\r\n data_set_val = []\r\n label_set_train = []\r\n label_set_val = []\r\n\r\n for i in range(data_set.shape[0]): #行数 shape[2]通道数\r\n index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']\r\n np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习\r\n a = index[:int((data_set.shape[1]) * 0.8)]\r\n data = data_set[i] #第i行\r\n\r\n data_train = data[a]\r\n data_val = np.delete(data, a, 0)\r\n data_set_train.append(data_train)\r\n data_set_val.append(data_val)\r\n label_set_train.extend(label_set[i][:len(data_train)])\r\n label_set_val.extend(label_set[i][:len(data_val)])\r\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\r\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\r\n label_set_train = np.array(label_set_train)\r\n label_set_val = np.array(label_set_val)\r\n\r\n return data_set_train, data_set_val, label_set_train, label_set_val\r\n\r\n\r\n# training process\r\ndef train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):\r\n global alpha\r\n #torch.cuda.empty_cache()\r\n\r\n length = len(train_dataset.tensors[0])\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)\r\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)\r\n\r\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)\r\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)\r\n\r\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致\r\n # t_loader_iter = iter(t_loader)\r\n\r\n val_loss_s = []\r\n val_loss_t = []\r\n val_acc_s = []\r\n val_acc_t = []\r\n cross_loss = [] #暂时不知道作用\r\n Source_Train_Acc=[]\r\n\r\n for epoch in range(config.epochs):\r\n # t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致\r\n t_loader_iter = iter(t_loader)\r\n\r\n model.train()\r\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader):\r\n p = float(index) / 20\r\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\r\n t_data_train = t_loader_iter.next()\r\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\r\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\r\n s_label_train = s_label_train.long().to(device)\r\n\r\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\r\n t_domain_label = torch.ones(config.batch_train).long().cuda()\r\n\r\n s_out_train, s_domain_out = model(s_data_train, alpha)\r\n t_out_train, t_domain_out = model(t_data_train, alpha)\r\n\r\n\r\n loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失\r\n loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失\r\n\r\n loss_c = criterion(s_out_train, s_label_train) #分类器损失\r\n loss = loss_c + (loss_domain_s + loss_domain_t)*0.02\r\n\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1\r\n correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率\r\n acc = 100. * correct_s.item() / len(s_data_train)\r\n Source_Train_Acc.append(acc)\r\n wandb.log({\"Source Train Acc\": acc})\r\n\r\n if index % 2 == 0:\r\n print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format\r\n (epoch, config.epochs, (index + 1) * len(s_data_train), length,\r\n 100. * (config.batch_train * (index + 1) / length), loss_c.item(),\r\n loss_domain_s.item() + loss_domain_t.item()\r\n , acc))\r\n\r\n #validation\r\n model.eval()\r\n #源域验证\r\n correct_val_s = 0\r\n sum_loss_s = 0\r\n length_val_s = len(val_dataset_s)\r\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\r\n with torch.no_grad():\r\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\r\n s_label_val = s_label_val.long().to(device)\r\n\r\n output_val_s, _ = model(s_data_val, alpha)\r\n loss_s = criterion(output_val_s, s_label_val)\r\n\r\n pred_val_s = torch.argmax(output_val_s.data, 1)\r\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\r\n sum_loss_s += loss_s\r\n acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率\r\n average_loss_s = sum_loss_s.item() / length_val_s #源域损失\r\n\r\n #目标域验证\r\n correct_val_t = 0\r\n sum_loss_t = 0\r\n length_val_t = len(val_dataset_t)\r\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\r\n with torch.no_grad():\r\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\r\n t_label_val = t_label_val.long().to(device)\r\n\r\n output_val_t, _ = model(t_data_val, alpha)\r\n loss_t = criterion(output_val_t, t_label_val)\r\n\r\n pred_val_t = torch.argmax(output_val_t.data, 1)\r\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\r\n sum_loss_t += loss_t\r\n acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率\r\n average_loss_t = sum_loss_t.item() / length_val_t #目标域损失\r\n\r\n metrics = {\"Acc_val_t\": acc_t, 'epoch':epoch}\r\n wandb.log(metrics)\r\n\r\n\r\n print('\\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(\r\n epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))\r\n\r\n val_loss_s.append(loss_s.item())\r\n val_loss_t.append(loss_t.item())\r\n val_acc_t.append(acc_t)\r\n val_acc_s.append(acc_s)\r\n\r\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, \"model.pth\"))\r\n\r\n #画出验证集正确率曲线\r\n plt.plot(val_acc_s, 'r-',marker='s')\r\n plt.plot(val_acc_t, 'g-',marker='*')\r\n plt.legend([\"Source domain validation accuracy\", \"Target domain validation accuracy\"])\r\n plt.xlabel('Epochs')\r\n plt.ylabel('validation accuracy')\r\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\r\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\r\n plt.savefig(\"Source doamin & Target domain Validation Accuracy Rate.png\")\r\n plt.show()\r\n\r\n #画出验证集损失\r\n plt.plot(val_loss_s, 'r-',marker='o')\r\n plt.plot(val_loss_t, 'g-',marker='x')\r\n plt.legend([\"Source domain validation Loss\", \"Target domain validation Loss\"])\r\n plt.xlabel('Epochs')\r\n plt.ylabel('val_loss')\r\n plt.title('Source domain & Target domain Validation Loss')\r\n plt.savefig(\"Source domain & Target domain Validation Loss\")\r\n plt.show()\r\n\r\n\r\n# testing\r\ndef test(test_dataset):\r\n model.eval()\r\n length = len(test_dataset)\r\n correct = 0\r\n test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)\r\n\r\n y_test = []\r\n y_pred = []\r\n\r\n for index, (data, label) in enumerate(test_loader):\r\n with torch.no_grad():\r\n data = data.float().to(device)\r\n label = label.long().to(device)\r\n y_test.append(label)\r\n\r\n output, _ = model(data.unsqueeze(dim=1), alpha)\r\n pred = torch.argmax(output.data, 1)\r\n y_pred.append(pred)\r\n correct += pred.eq(label).cpu().sum()\r\n\r\n acc = 100. * correct / length\r\n return acc\r\n\r\n\r\nif __name__ == '__main__':\r\n torch.cuda.empty_cache()\r\n # use cpu or gpu\r\n if torch.cuda.is_available():\r\n device = 'cuda'\r\n else:\r\n device = 'cpu'\r\n device = torch.device(device)\r\n\r\n # CWRU\r\n dataset_s_train = np.load(r'bearing numpy data\\dataset_train_0HP_100.npz')\r\n dataset_s_test = np.load(r'bearing numpy data\\dataset_val_0HP_80.npz')\r\n dataset_t_train = np.load(r'bearing numpy data\\dataset_train_3HP_100.npz')\r\n dataset_t_test = np.load(r'bearing numpy data\\dataset_val_3HP_80.npz')\r\n\r\n data_s_train_val = dataset_s_train['data']\r\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\r\n data_t_train_val = dataset_t_train['data']\r\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\r\n label_s_train_val = dataset_s_train['label']\r\n label_s_test = dataset_s_test['label'].reshape(1, -1)\r\n label_t_train_val = dataset_t_train['label']\r\n label_t_test = dataset_t_test['label'].reshape(1, -1)\r\n\r\n iteration_acc = []\r\n\r\n test_acc_s = []\r\n\r\n\r\n # repeat several times for an average result\r\n for iteration in range(1):\r\n # load model\r\n model = WDCNN1(C_in=1, class_num=10).to(device)\r\n model.apply(weight_init)\r\n model.apply(batch_norm_init)\r\n\r\n # train/val\r\n data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)\r\n data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)\r\n\r\n # transfer ndarray to tensor\r\n data_s_train = torch.from_numpy(data_s_train)\r\n data_s_val = torch.from_numpy(data_s_val)\r\n data_t_val = torch.from_numpy(data_t_val) #加的验证\r\n data_s_test = torch.from_numpy(data_s_test)\r\n\r\n data_t_train = torch.from_numpy(data_t_train)\r\n data_t_test = torch.from_numpy(data_t_test)\r\n\r\n label_s_train = torch.from_numpy(label_s_train)\r\n label_s_val = torch.from_numpy(label_s_val)\r\n label_t_val = torch.from_numpy(label_t_val) #加的验证\r\n label_s_test = torch.from_numpy(label_s_test)\r\n #label_t_train = torch.from_numpy(label_t_train)\r\n label_t_test = torch.from_numpy(label_t_test)\r\n\r\n # seal to data-set\r\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\r\n train_dataset_t = Data.TensorDataset(data_t_train)\r\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\r\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证\r\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())\r\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())\r\n\r\n # print(train_dataset_s, val_dataset_s)\r\n criterion = nn.NLLLoss()\r\n\r\n train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)\r\n s_test_acc = test(test_dataset_s)\r\n t_test_acc = test(test_dataset_t)\r\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))\r\n\r\n wandb.finish()\r\n\r\n\r\n", "step-ids": [ 3, 5, 7, 9, 10 ] }
[ 3, 5, 7, 9, 10 ]
# maze = [0, 3, 0, 1, -3] with open('./day_5/input.txt') as f: maze = f.readlines() f.close maze = [int(line.strip()) for line in maze] # I think I will just expand on the original functions # from now on rather than separating part one from two def escape_maze(maze): end = len(maze) - 1 step_counter = 0 offset = 0 while True: cur_index = offset offset = offset + maze[cur_index] if maze[cur_index] >= 3: maze[cur_index] = maze[cur_index] - 1 else: maze[cur_index] = maze[cur_index] + 1 step_counter += 1 if offset > end: return step_counter print(escape_maze(maze))
normal
{ "blob_id": "a4dfac7e15064d92c806a4e3f972f06e4dca6b11", "index": 5181, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\n<mask token>\n", "step-3": "with open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\n<mask token>\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))\n", "step-4": "with open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\nmaze = [int(line.strip()) for line in maze]\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))\n", "step-5": "# maze = [0, 3, 0, 1, -3]\nwith open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\nmaze = [int(line.strip()) for line in maze]\n\n# I think I will just expand on the original functions\n# from now on rather than separating part one from two\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import numpy as np from scipy import stats from scipy import interpolate from math import factorial from scipy import signal """ A continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however, it should work for most datasets. Parameters ---------- lowerBound: The lowest value of the scale factor to use in the wavelet transform upperBound: The highest value of the scale factor to use in the wavelet transform steps: The number of scale factors we want between the highest and lowest bounds rowWindow: The maximum number of rows that a ridge line can be discontinuous before it is terminated. I.e. the maximum number of scale factors it can deviate. colWindow: The maximum number of columns that a ridge line can wander before it is terminated. I.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate. """ # CWT Transform parameters lowerBound = 1 upperBound = 70 steps = 90 # Ridge line filtering parameters rowWindow = 2 columnWindow = 5 class _spectra: def __init__(self,x,y): self.x = x self.y = y def x(self): return waveNumbers def y(self): return intensities """ Simple helper function for finding all of the maxima in the 2D array returned by the wavelet transform. Works on the basis of a simple comparison between neighbouring elements. These values form the initial basis for the ridge lines. """ def _findMaxima1D(CWTArray): maximas = np.zeros(CWTArray.size,dtype=(float,3)) # Populate the maxima array with a tuple of the coordinates and the values of the maxima count = 0 for j,row in enumerate(CWTArray): for i,element in enumerate(row): try: if element > row[i-1] and element > row[i+1]: maximas[count]= ((steps-j,i,element)) count += 1 except IndexError: pass return np.vstack(maximas[:count]) """ Filter the ridge lines found from the maxima of the CWT coefficient array based on a set parameters, namely the maximum deviations in wavenumber and scale space. Any lines which are found from this criteria are considered to be peaks and further evaluated in the following steps. """ def _filterRidgeLines(maximaArray,rowMax,colMax): # Helper to prevent duplicating ridge lines def checkValues(value, ridgeLines): for lines in ridgeLines: for points in lines: if value in points: return True return False ridgeLines = [] # Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column) for i,row in enumerate(maximaArray): ridge = [] # For each maxima start a ridge line colPos = row[1] # Get the column position of the current maxima rowPos = row[0] # Get the row position of the current maxima # If this value is already part of another ridge line, move to the next value if checkValues(colPos, ridgeLines): continue for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima if nextRows[0] == rowPos: # If the scale factors are the same, skip continue if np.abs(colPos - nextRows[1]) <= colMax and \ np.abs(rowPos - nextRows[0]) <= rowMax: ridge.append((rowPos,colPos,nextRows[2])) rowPos = nextRows[0] colPos = nextRows[1] # If the ridge lines run all the way to the lowest scale factors, add them to the list if len(ridge) != 0: if ridge[-1][0] <= 2: ridgeLines.append(ridge) return ridgeLines """ For each of the ridge lines found from the filtered CWT array, determine the other characteristics of the peaks. The position of the peak is determined from the position of the maxima in the ridge line. """ def getPeakInfo(ridgeLines,data,waveletCoeff): # For each of the ridge lines we have found, locate the positions of the maxima. These # correspond to the peak centers. peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\ ('cwtCoeff','f'),('SNR','f'),('length','uint8'),\ ('intensity','f'),('wavenumber','f')]) # For each of the ridge lines, add the position of the peak center and the length of the # line. These are useful for filtering peaks later. for i,lines in enumerate(ridgeLines): # Find the index of the maximum CWT coefficient. This is the peak center. maximum = np.argmax(zip(*lines)[2]) peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\ data.x[lines[maximum][1]],data.y[lines[maximum][1]] # Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is # defined as the 95th quantile of the absolute values of the lowest scale factor coefficients. for i, peaks in enumerate(peakInfo): SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15]) if len(SNR) == 0: peakInfo['SNR'][i] = 0 else: SNR = stats.scoreatpercentile(SNR, 95) peakInfo['SNR'][i] = SNR return peakInfo """ Processes spectral data and returns a structured array of peak information. Peak can then be filtered based on ridge line length, signal to noise ratio and scale values. """ def getPeaks(waveNumbers,intensities): data = _spectra(waveNumbers,intensities) # Take the CWT of the spectra. Trim the result to remove padding. waveletCoeff = signal.cwt(intensities, signal.ricker, \ np.linspace(lowerBound,upperBound,steps)) # Flip the matrix so the highest wavelet coefficient is the top row waveletCoeff = np.flipud(waveletCoeff) # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines # takes a (scaleFactor,3) array of positions and values of maxima. ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow) # Populate a structured array with peak information peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff) return peakInfo
normal
{ "blob_id": "8f5d9918260e2f50fb229a7067f820a186101b99", "index": 1080, "step-1": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n <mask token>\n\n def y(self):\n return intensities\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n", "step-3": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n", "step-4": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n", "step-5": "import numpy as np\nfrom scipy import stats\nfrom scipy import interpolate\nfrom math import factorial\nfrom scipy import signal\n\n\"\"\"\n\nA continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,\nit should work for most datasets.\n\nParameters\n----------\n\nlowerBound: The lowest value of the scale factor to use in the wavelet transform\nupperBound: The highest value of the scale factor to use in the wavelet transform\nsteps: The number of scale factors we want between the highest and lowest bounds\n\nrowWindow: The maximum number of rows that a ridge line can be discontinuous before it is\nterminated. I.e. the maximum number of scale factors it can deviate.\n\ncolWindow: The maximum number of columns that a ridge line can wander before it is terminated.\nI.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.\n\n\"\"\"\n\n# CWT Transform parameters\nlowerBound = 1\nupperBound = 70\nsteps = 90\n\n# Ridge line filtering parameters\nrowWindow = 2\ncolumnWindow = 5\n\nclass _spectra:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\"\"\"\n\nSimple helper function for finding all of the maxima in the 2D array returned by the wavelet\ntransform. Works on the basis of a simple comparison between neighbouring elements. These\nvalues form the initial basis for the ridge lines.\n\n\"\"\"\ndef _findMaxima1D(CWTArray):\n\n maximas = np.zeros(CWTArray.size,dtype=(float,3))\n\n # Populate the maxima array with a tuple of the coordinates and the values of the maxima\n count = 0\n for j,row in enumerate(CWTArray):\n for i,element in enumerate(row):\n try:\n if element > row[i-1] and element > row[i+1]:\n maximas[count]= ((steps-j,i,element))\n count += 1\n except IndexError:\n pass\n\n return np.vstack(maximas[:count])\n\n\"\"\"\n\nFilter the ridge lines found from the maxima of the CWT coefficient array based on a set\nparameters, namely the maximum deviations in wavenumber and scale space. Any lines which are\nfound from this criteria are considered to be peaks and further evaluated in the following\nsteps.\n\n\"\"\"\ndef _filterRidgeLines(maximaArray,rowMax,colMax):\n\n # Helper to prevent duplicating ridge lines\n def checkValues(value, ridgeLines):\n\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n\n ridgeLines = []\n\n # Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)\n for i,row in enumerate(maximaArray):\n ridge = [] # For each maxima start a ridge line\n colPos = row[1] # Get the column position of the current maxima\n rowPos = row[0] # Get the row position of the current maxima\n # If this value is already part of another ridge line, move to the next value\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima\n if nextRows[0] == rowPos: # If the scale factors are the same, skip\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and \\\n np.abs(rowPos - nextRows[0]) <= rowMax:\n ridge.append((rowPos,colPos,nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n\n # If the ridge lines run all the way to the lowest scale factors, add them to the list\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n\n return ridgeLines\n\n\"\"\"\n\nFor each of the ridge lines found from the filtered CWT array, determine the other\ncharacteristics of the peaks.\n\nThe position of the peak is determined from the position of the maxima in the ridge\nline.\n\n\"\"\"\ndef getPeakInfo(ridgeLines,data,waveletCoeff):\n\n # For each of the ridge lines we have found, locate the positions of the maxima. These\n # correspond to the peak centers.\n peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\\\n ('cwtCoeff','f'),('SNR','f'),('length','uint8'),\\\n ('intensity','f'),('wavenumber','f')])\n\n # For each of the ridge lines, add the position of the peak center and the length of the\n # line. These are useful for filtering peaks later.\n for i,lines in enumerate(ridgeLines):\n # Find the index of the maximum CWT coefficient. This is the peak center.\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\\\n data.x[lines[maximum][1]],data.y[lines[maximum][1]]\n\n # Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is\n # defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n\n return peakInfo\n\n\"\"\"\n\nProcesses spectral data and returns a structured array of peak information. Peak can then be\nfiltered based on ridge line length, signal to noise ratio and scale values.\n\n\"\"\"\ndef getPeaks(waveNumbers,intensities):\n\n data = _spectra(waveNumbers,intensities)\n\n # Take the CWT of the spectra. Trim the result to remove padding.\n waveletCoeff = signal.cwt(intensities, signal.ricker, \\\n np.linspace(lowerBound,upperBound,steps))\n\n # Flip the matrix so the highest wavelet coefficient is the top row\n waveletCoeff = np.flipud(waveletCoeff)\n\n # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines\n # takes a (scaleFactor,3) array of positions and values of maxima.\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)\n\n # Populate a structured array with peak information\n peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)\n\n return peakInfo\n", "step-ids": [ 3, 5, 6, 8, 11 ] }
[ 3, 5, 6, 8, 11 ]
from math import ceil n, k = map(int, input().split()) d = list(map(int, input().split())) packs = [0]*k for i in d: packs[i%k] += 1 counter = packs[0]//2 if (k % 2) == 0: counter += packs[k//2]//2 for i in range(1, ceil(k/2)): counter += min(packs[i], packs[k-i]) print(counter*2)
normal
{ "blob_id": "2226382c494af33957a44d9f1682f7deacf574a2", "index": 2075, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in d:\n packs[i % k] += 1\n<mask token>\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n", "step-3": "<mask token>\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n", "step-4": "from math import ceil\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n", "step-5": "from math import ceil\n\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\n\npacks = [0]*k\nfor i in d:\n packs[i%k] += 1\n\ncounter = packs[0]//2\nif (k % 2) == 0:\n counter += packs[k//2]//2\nfor i in range(1, ceil(k/2)):\n counter += min(packs[i], packs[k-i])\n\nprint(counter*2)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Authors: Robert Luke <[email protected]> # # License: BSD (3-clause) import numpy as np from mne.io.pick import _picks_to_idx def run_GLM(raw, design_matrix, noise_model='ar1', bins=100, n_jobs=1, verbose=0): """ Run GLM on data using supplied design matrix. This is a wrapper function for nilearn.stats.first_level_model.run_glm. Parameters ---------- raw : instance of Raw The haemoglobin data. design_matrix : as specified in Nilearn The design matrix. noise_model : {'ar1', 'ols'}, optional The temporal variance model. Defaults to 'ar1'. bins : : int, optional Maximum number of discrete bins for the AR(1) coef histogram. n_jobs : int, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : int, optional The verbosity level. Defaut is 0 Returns ------- glm_estimates : dict Keys correspond to the different labels values values are RegressionResults instances corresponding to the voxels. """ from nilearn.glm.first_level import run_glm picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True) ch_names = raw.ch_names results = dict() for pick in picks: labels, glm_estimates = run_glm(raw.get_data(pick).T, design_matrix.values, noise_model=noise_model, bins=bins, n_jobs=n_jobs, verbose=verbose) results[ch_names[pick]] = glm_estimates[labels[0]] return results def compute_contrast(glm_est, contrast, contrast_type=None): """ Compute contrasts on regression results. This is a wrapper function for nilearn.stats.contrasts. Parameters ---------- glm_estimates : dict Dictionary of nilearn regression results as returned by `run_glm`. contrast : numpy.ndarray of shape (p) or (q, p), Where q = number of contrast vectors and p = number of regressors. contrast_type : {None, ‘t’, ‘F’}, optional Type of the contrast. If None, then defaults to ‘t’ for 1D con_val and ‘F’ for 2D con_val. Returns ------- contrast : Contrast instance, Yields the statistics of the contrast (effects, variance, p-values). """ from nilearn.glm.contrasts import compute_contrast as _cc return _cc(np.array(list(glm_est.keys())), glm_est, contrast, contrast_type=contrast_type)
normal
{ "blob_id": "8279c6d5f33d5580bef20e497e2948461a1de62c", "index": 7951, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef run_GLM(raw, design_matrix, noise_model='ar1', bins=100, n_jobs=1,\n verbose=0):\n \"\"\"\n Run GLM on data using supplied design matrix.\n\n This is a wrapper function for nilearn.stats.first_level_model.run_glm.\n\n Parameters\n ----------\n raw : instance of Raw\n The haemoglobin data.\n design_matrix : as specified in Nilearn\n The design matrix.\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Defaults to 'ar1'.\n bins : : int, optional\n Maximum number of discrete bins for the AR(1) coef histogram.\n n_jobs : int, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n verbose : int, optional\n The verbosity level. Defaut is 0\n\n Returns\n -------\n glm_estimates : dict\n Keys correspond to the different labels values values are\n RegressionResults instances corresponding to the voxels.\n \"\"\"\n from nilearn.glm.first_level import run_glm\n picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)\n ch_names = raw.ch_names\n results = dict()\n for pick in picks:\n labels, glm_estimates = run_glm(raw.get_data(pick).T, design_matrix\n .values, noise_model=noise_model, bins=bins, n_jobs=n_jobs,\n verbose=verbose)\n results[ch_names[pick]] = glm_estimates[labels[0]]\n return results\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef run_GLM(raw, design_matrix, noise_model='ar1', bins=100, n_jobs=1,\n verbose=0):\n \"\"\"\n Run GLM on data using supplied design matrix.\n\n This is a wrapper function for nilearn.stats.first_level_model.run_glm.\n\n Parameters\n ----------\n raw : instance of Raw\n The haemoglobin data.\n design_matrix : as specified in Nilearn\n The design matrix.\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Defaults to 'ar1'.\n bins : : int, optional\n Maximum number of discrete bins for the AR(1) coef histogram.\n n_jobs : int, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n verbose : int, optional\n The verbosity level. Defaut is 0\n\n Returns\n -------\n glm_estimates : dict\n Keys correspond to the different labels values values are\n RegressionResults instances corresponding to the voxels.\n \"\"\"\n from nilearn.glm.first_level import run_glm\n picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)\n ch_names = raw.ch_names\n results = dict()\n for pick in picks:\n labels, glm_estimates = run_glm(raw.get_data(pick).T, design_matrix\n .values, noise_model=noise_model, bins=bins, n_jobs=n_jobs,\n verbose=verbose)\n results[ch_names[pick]] = glm_estimates[labels[0]]\n return results\n\n\ndef compute_contrast(glm_est, contrast, contrast_type=None):\n \"\"\"\n Compute contrasts on regression results.\n\n This is a wrapper function for nilearn.stats.contrasts.\n\n Parameters\n ----------\n glm_estimates : dict\n Dictionary of nilearn regression results as returned by `run_glm`.\n contrast : numpy.ndarray of shape (p) or (q, p),\n Where q = number of contrast vectors and p = number of regressors.\n contrast_type : {None, ‘t’, ‘F’}, optional\n Type of the contrast. If None, then defaults to ‘t’ for 1D con_val\n and ‘F’ for 2D con_val.\n\n Returns\n -------\n contrast : Contrast instance,\n Yields the statistics of the contrast (effects, variance, p-values).\n \"\"\"\n from nilearn.glm.contrasts import compute_contrast as _cc\n return _cc(np.array(list(glm_est.keys())), glm_est, contrast,\n contrast_type=contrast_type)\n", "step-4": "import numpy as np\nfrom mne.io.pick import _picks_to_idx\n\n\ndef run_GLM(raw, design_matrix, noise_model='ar1', bins=100, n_jobs=1,\n verbose=0):\n \"\"\"\n Run GLM on data using supplied design matrix.\n\n This is a wrapper function for nilearn.stats.first_level_model.run_glm.\n\n Parameters\n ----------\n raw : instance of Raw\n The haemoglobin data.\n design_matrix : as specified in Nilearn\n The design matrix.\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Defaults to 'ar1'.\n bins : : int, optional\n Maximum number of discrete bins for the AR(1) coef histogram.\n n_jobs : int, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n verbose : int, optional\n The verbosity level. Defaut is 0\n\n Returns\n -------\n glm_estimates : dict\n Keys correspond to the different labels values values are\n RegressionResults instances corresponding to the voxels.\n \"\"\"\n from nilearn.glm.first_level import run_glm\n picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)\n ch_names = raw.ch_names\n results = dict()\n for pick in picks:\n labels, glm_estimates = run_glm(raw.get_data(pick).T, design_matrix\n .values, noise_model=noise_model, bins=bins, n_jobs=n_jobs,\n verbose=verbose)\n results[ch_names[pick]] = glm_estimates[labels[0]]\n return results\n\n\ndef compute_contrast(glm_est, contrast, contrast_type=None):\n \"\"\"\n Compute contrasts on regression results.\n\n This is a wrapper function for nilearn.stats.contrasts.\n\n Parameters\n ----------\n glm_estimates : dict\n Dictionary of nilearn regression results as returned by `run_glm`.\n contrast : numpy.ndarray of shape (p) or (q, p),\n Where q = number of contrast vectors and p = number of regressors.\n contrast_type : {None, ‘t’, ‘F’}, optional\n Type of the contrast. If None, then defaults to ‘t’ for 1D con_val\n and ‘F’ for 2D con_val.\n\n Returns\n -------\n contrast : Contrast instance,\n Yields the statistics of the contrast (effects, variance, p-values).\n \"\"\"\n from nilearn.glm.contrasts import compute_contrast as _cc\n return _cc(np.array(list(glm_est.keys())), glm_est, contrast,\n contrast_type=contrast_type)\n", "step-5": "# Authors: Robert Luke <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nfrom mne.io.pick import _picks_to_idx\n\n\ndef run_GLM(raw, design_matrix, noise_model='ar1', bins=100,\n n_jobs=1, verbose=0):\n \"\"\"\n Run GLM on data using supplied design matrix.\n\n This is a wrapper function for nilearn.stats.first_level_model.run_glm.\n\n Parameters\n ----------\n raw : instance of Raw\n The haemoglobin data.\n design_matrix : as specified in Nilearn\n The design matrix.\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Defaults to 'ar1'.\n bins : : int, optional\n Maximum number of discrete bins for the AR(1) coef histogram.\n n_jobs : int, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n verbose : int, optional\n The verbosity level. Defaut is 0\n\n Returns\n -------\n glm_estimates : dict\n Keys correspond to the different labels values values are\n RegressionResults instances corresponding to the voxels.\n \"\"\"\n from nilearn.glm.first_level import run_glm\n\n picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)\n ch_names = raw.ch_names\n\n results = dict()\n for pick in picks:\n labels, glm_estimates = run_glm(raw.get_data(pick).T,\n design_matrix.values,\n noise_model=noise_model, bins=bins,\n n_jobs=n_jobs, verbose=verbose)\n results[ch_names[pick]] = glm_estimates[labels[0]]\n\n return results\n\n\ndef compute_contrast(glm_est, contrast, contrast_type=None):\n \"\"\"\n Compute contrasts on regression results.\n\n This is a wrapper function for nilearn.stats.contrasts.\n\n Parameters\n ----------\n glm_estimates : dict\n Dictionary of nilearn regression results as returned by `run_glm`.\n contrast : numpy.ndarray of shape (p) or (q, p),\n Where q = number of contrast vectors and p = number of regressors.\n contrast_type : {None, ‘t’, ‘F’}, optional\n Type of the contrast. If None, then defaults to ‘t’ for 1D con_val\n and ‘F’ for 2D con_val.\n\n Returns\n -------\n contrast : Contrast instance,\n Yields the statistics of the contrast (effects, variance, p-values).\n \"\"\"\n\n from nilearn.glm.contrasts import compute_contrast as _cc\n return _cc(np.array(list(glm_est.keys())), glm_est, contrast,\n contrast_type=contrast_type)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys import os from django.conf import settings BASE_DIR=os.path.dirname(__file__) settings.configure( DEBUG=True, SECRET_KEY='ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF='sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=( 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder', 'compressor', ), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR,'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,'_build'), STATIC_ROOT=os.path.join(BASE_DIR,'_build','static'), #STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage', STATICFILES_FINDERS=( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) ) if __name__ =="__main__": from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
normal
{ "blob_id": "d30e5e24dd06a4846fdde3c9fcac0a5dac55ad0d", "index": 5916, "step-1": "<mask token>\n", "step-2": "<mask token>\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n", "step-3": "<mask token>\nBASE_DIR = os.path.dirname(__file__)\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n", "step-4": "import sys\nimport os\nfrom django.conf import settings\nBASE_DIR = os.path.dirname(__file__)\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n", "step-5": "import sys\nimport os\n\nfrom django.conf import settings\n\nBASE_DIR=os.path.dirname(__file__)\n\nsettings.configure(\n\tDEBUG=True,\n\tSECRET_KEY='ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_',\n\tROOT_URLCONF='sitebuilder.urls',\n\tMIDDLEWARE_CLASSES=(),\n\tINSTALLED_APPS=(\n\t\t'django.contrib.staticfiles',\n\t\t'django.contrib.webdesign',\n\t\t'sitebuilder',\n\t\t'compressor',\n\n\t\t),\n\tSTATIC_URL='/static/',\n\tSITE_PAGES_DIRECTORY=os.path.join(BASE_DIR,'pages'),\n\tSITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,'_build'),\n\tSTATIC_ROOT=os.path.join(BASE_DIR,'_build','static'),\n\t#STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',\n\tSTATICFILES_FINDERS=(\n\t\t'django.contrib.staticfiles.finders.FileSystemFinder',\n\t\t'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\t\t'compressor.finders.CompressorFinder',\n\n\t\t)\n\n)\n\nif __name__ ==\"__main__\":\n\tfrom django.core.management import execute_from_command_line\n\n\texecute_from_command_line(sys.argv)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# --- Do not remove these libs --- from freqtrade.strategy.interface import IStrategy from typing import Dict, List from functools import reduce from pandas import DataFrame # -------------------------------- import datetime import talib.abstract as ta import freqtrade.vendor.qtpylib.indicators as qtpylib import numpy as np# noqa class ema(IStrategy): max_open_trades = 10 stake_amount = 50 # Minimal ROI designed for the strategy. # This attribute will be overridden if the config file contains "minimal_roi" # Optimal stoploss designed for the strategy # This attribute will be overridden if the config file contains "stoploss" stoploss = -1 minimal_roi = { "0": 10 } # Optimal timeframe for the strategy timeframe = '5m' # trailing stoploss trailing_stop = False trailing_stop_positive = 0.1 trailing_stop_positive_offset = 0.2 # run "populate_indicators" only for new candle process_only_new_candles = False # Experimental settings (configuration will overide these if set) use_sell_signal = True sell_profit_only = False ignore_roi_if_buy_signal = False # Optional order type mapping order_types = { 'buy': 'limit', 'sell': 'limit', 'stoploss': 'market', 'stoploss_on_exchange': False } def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: """ Adds several different TA indicators to the given DataFrame Performance Note: For the best performance be frugal on the number of indicators you are using. Let uncomment only the indicator you are using in your strategies or your hyperopt configuration, otherwise you will waste your memory and CPU usage. """ dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9) dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18) dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32) dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64) dataframe['ema'] =dataframe['ema6']-dataframe['ema24'] dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25'] dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5 dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29) return dataframe def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: """ Based on TA indicators, populates the buy signal for the given dataframe :param dataframe: DataFrame :return: DataFrame with buy column """ dataframe.loc[ ( (qtpylib.crossed_above(dataframe['ema'],dataframe['ema2'])) ),'buy'] = 1 return dataframe def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: """ Based on TA indicators, populates the sell signal for the given dataframe :param dataframe: DataFrame :return: DataFrame with buy column """ dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1 return dataframe
normal
{ "blob_id": "7b047ba110732d1b0a749bcbbaa9b55306ca2071", "index": 6434, "step-1": "<mask token>\n\n\nclass ema(IStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n", "step-2": "<mask token>\n\n\nclass ema(IStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n", "step-3": "<mask token>\n\n\nclass ema(IStrategy):\n max_open_trades = 10\n stake_amount = 50\n stoploss = -1\n minimal_roi = {'0': 10}\n timeframe = '5m'\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n process_only_new_candles = False\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n order_types = {'buy': 'limit', 'sell': 'limit', 'stoploss': 'market',\n 'stoploss_on_exchange': False}\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n", "step-4": "from freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\nimport datetime\nimport talib.abstract as ta\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport numpy as np\n\n\nclass ema(IStrategy):\n max_open_trades = 10\n stake_amount = 50\n stoploss = -1\n minimal_roi = {'0': 10}\n timeframe = '5m'\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n process_only_new_candles = False\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n order_types = {'buy': 'limit', 'sell': 'limit', 'stoploss': 'market',\n 'stoploss_on_exchange': False}\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n", "step-5": "# --- Do not remove these libs ---\nfrom freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\n# --------------------------------\n\nimport datetime\nimport talib.abstract as ta\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport numpy as np# noqa\n\n\nclass ema(IStrategy):\n\n max_open_trades = 10\n stake_amount = 50\n # Minimal ROI designed for the strategy.\n # This attribute will be overridden if the config file contains \"minimal_roi\"\n\n # Optimal stoploss designed for the strategy\n # This attribute will be overridden if the config file contains \"stoploss\"\n stoploss = -1\n\n minimal_roi = {\n \"0\": 10\n }\n\n # Optimal timeframe for the strategy\n timeframe = '5m'\n\n # trailing stoploss\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n\n # run \"populate_indicators\" only for new candle\n process_only_new_candles = False\n\n # Experimental settings (configuration will overide these if set)\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n\n # Optional order type mapping\n order_types = {\n 'buy': 'limit',\n 'sell': 'limit',\n 'stoploss': 'market',\n 'stoploss_on_exchange': False\n }\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n\n dataframe['ema'] =dataframe['ema6']-dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n\n dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n\n dataframe.loc[\n (\n (qtpylib.crossed_above(dataframe['ema'],dataframe['ema2']))\n ),'buy'] = 1\n\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1\n\n return dataframe", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#from tinyTensor.Node import Node import tinyTensor import plotly.plotly as py from graphviz import render #from tinyTensor.Operation import Operation def init(): global _default_graph _default_graph = None def postOrder(node): nodes_postorder = [] def recurse(node): if isinstance(node, tinyTensor.Node.Node): for input_node in node.inputNodes: recurse(input_node) nodes_postorder.append(node) recurse(node) return nodes_postorder class Graph(): def __init__(self): self.nodes = [] self.placeholderNames = [] def appendNode(self,node): if(node.name in self.placeholderNames and node.isPlaceholder): raise Exception("Placeholder name \"{}\" is already in use in current graph".format(node.name)) elif(node.isPlaceholder): self.placeholderNames.append(node.name) self.nodes.append(node) def set_default(self): init() global _default_graph _default_graph = self def visualize_nodes(self, node): # generating the .gv file gv_file = "graph \"\" \n{\n" global nodeCounter nodeCounter = 0 def recurse(nodes,gv_file,parent_node_str = None): global nodeCounter nodes_list = [] if(isinstance(nodes,list)): nodes_list.extend(nodes) else: nodes_list.append(nodes) for node in nodes_list: # node should add itself to the list current_node_str = "n" + str(nodeCounter) nodeCounter += 1 ''' operation might contain non-node constants, hence need to make sure that they are converted to node''' if(type(node) in (int,float)): node = tinyTensor.Node.Node.variable(node) # creating a variable node '''creating the node labels''' if(isinstance(node,tinyTensor.Operation.Operation)): gv_file += current_node_str + " [label=\"{} ({})\"] ;\n".format(node.operator,node.value) elif(node.isPlaceholder): gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value) else: gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value) # now creating connection line to parent(s) TODO: make it possible to have many parents, (nodes should have output nodes list) if(parent_node_str != None): gv_file += parent_node_str + " -- " + current_node_str + "; \n" # applying the same to the children of this node if(len(node.inputNodes) > 0): gv_file = recurse(node.inputNodes,gv_file,current_node_str) return gv_file gv_file = recurse(node,gv_file) gv_file += "}\n" with open("network.gv","w+") as file: file.writelines(gv_file) #render('dot','png','network.gv') print(gv_file) def visualize_layers(self,layer_list): neuron_dict = {} #generating dict of neurons gv_file = "graph \"\" \n{\n" #dealing with input nodes for node in layer_list[0].inputList: neuron_dict[node] = node.name gv_file += neuron_dict[node] + " [label=\"{}({})\"] ;\n".format(node.name,node.value) # creating dict for neurons for layer in layer_list: for neuron in layer.neuronList: neuron_dict[neuron] = "{}".format(neuron.name) gv_file += neuron_dict[neuron] + " [label=\"{}({})\"] ;\n".format(neuron.name,neuron.value) # drawing links between neurons for layer in layer_list: for neuron in layer.neuronList: for input_neuron in neuron.inputNeurons: gv_file += neuron_dict[neuron] + " -- " + neuron_dict[input_neuron] + "; \n" gv_file += "}\n" with open("network.gv","w+") as file: file.writelines(gv_file) print(gv_file)
normal
{ "blob_id": "7bd2a29bff1e435cf813dd54109d7f4e17612425", "index": 474, "step-1": "<mask token>\n\n\nclass Graph:\n <mask token>\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n", "step-2": "<mask token>\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n", "step-3": "<mask token>\n\n\ndef init():\n global _default_graph\n _default_graph = None\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n", "step-4": "import tinyTensor\nimport plotly.plotly as py\nfrom graphviz import render\n\n\ndef init():\n global _default_graph\n _default_graph = None\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n", "step-5": "#from tinyTensor.Node import Node\r\nimport tinyTensor\r\nimport plotly.plotly as py\r\nfrom graphviz import render\r\n#from tinyTensor.Operation import Operation\r\n\r\n\r\ndef init():\r\n global _default_graph\r\n _default_graph = None\r\n\r\ndef postOrder(node):\r\n nodes_postorder = []\r\n def recurse(node):\r\n if isinstance(node, tinyTensor.Node.Node):\r\n for input_node in node.inputNodes:\r\n recurse(input_node)\r\n nodes_postorder.append(node)\r\n recurse(node)\r\n return nodes_postorder\r\n\r\nclass Graph():\r\n\r\n def __init__(self):\r\n self.nodes = []\r\n self.placeholderNames = []\r\n\r\n def appendNode(self,node):\r\n if(node.name in self.placeholderNames and node.isPlaceholder):\r\n raise Exception(\"Placeholder name \\\"{}\\\" is already in use in current graph\".format(node.name))\r\n elif(node.isPlaceholder):\r\n self.placeholderNames.append(node.name)\r\n self.nodes.append(node)\r\n\r\n def set_default(self):\r\n init()\r\n global _default_graph\r\n _default_graph = self\r\n\r\n def visualize_nodes(self, node):\r\n # generating the .gv file\r\n gv_file = \"graph \\\"\\\" \\n{\\n\"\r\n global nodeCounter\r\n nodeCounter = 0\r\n def recurse(nodes,gv_file,parent_node_str = None):\r\n global nodeCounter\r\n nodes_list = []\r\n if(isinstance(nodes,list)):\r\n nodes_list.extend(nodes)\r\n else:\r\n nodes_list.append(nodes)\r\n for node in nodes_list:\r\n # node should add itself to the list\r\n current_node_str = \"n\" + str(nodeCounter)\r\n nodeCounter += 1\r\n ''' operation might contain non-node constants, hence need to make sure that they are converted to node'''\r\n if(type(node) in (int,float)):\r\n node = tinyTensor.Node.Node.variable(node) # creating a variable node\r\n '''creating the node labels'''\r\n if(isinstance(node,tinyTensor.Operation.Operation)):\r\n gv_file += current_node_str + \" [label=\\\"{} ({})\\\"] ;\\n\".format(node.operator,node.value)\r\n elif(node.isPlaceholder):\r\n gv_file += current_node_str + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n else:\r\n gv_file += current_node_str + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n # now creating connection line to parent(s) TODO: make it possible to have many parents, (nodes should have output nodes list)\r\n if(parent_node_str != None):\r\n gv_file += parent_node_str + \" -- \" + current_node_str + \"; \\n\"\r\n # applying the same to the children of this node\r\n if(len(node.inputNodes) > 0):\r\n gv_file = recurse(node.inputNodes,gv_file,current_node_str)\r\n return gv_file\r\n gv_file = recurse(node,gv_file)\r\n gv_file += \"}\\n\"\r\n with open(\"network.gv\",\"w+\") as file:\r\n file.writelines(gv_file)\r\n #render('dot','png','network.gv')\r\n print(gv_file)\r\n\r\n def visualize_layers(self,layer_list):\r\n neuron_dict = {}\r\n #generating dict of neurons\r\n gv_file = \"graph \\\"\\\" \\n{\\n\"\r\n #dealing with input nodes\r\n for node in layer_list[0].inputList:\r\n neuron_dict[node] = node.name\r\n gv_file += neuron_dict[node] + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n # creating dict for neurons\r\n for layer in layer_list:\r\n for neuron in layer.neuronList:\r\n neuron_dict[neuron] = \"{}\".format(neuron.name)\r\n gv_file += neuron_dict[neuron] + \" [label=\\\"{}({})\\\"] ;\\n\".format(neuron.name,neuron.value)\r\n # drawing links between neurons\r\n for layer in layer_list:\r\n for neuron in layer.neuronList:\r\n for input_neuron in neuron.inputNeurons:\r\n gv_file += neuron_dict[neuron] + \" -- \" + neuron_dict[input_neuron] + \"; \\n\"\r\n gv_file += \"}\\n\"\r\n with open(\"network.gv\",\"w+\") as file:\r\n file.writelines(gv_file)\r\n print(gv_file)\r\n\r\n\r\n\r\n\r\n", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
from rest_framework import serializers from .models import * class MovieSerializer(serializers.Serializer): movie_name = serializers.ListField(child=serializers.CharField()) class FilmSerializer(serializers.ModelSerializer): class Meta: model = Movie fields = '__all__'
normal
{ "blob_id": "0509afdce0d28cc04f4452472881fe9c5e4fbcc4", "index": 7825, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass MovieSerializer(serializers.Serializer):\n <mask token>\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Movie\n fields = '__all__'\n", "step-3": "<mask token>\n\n\nclass MovieSerializer(serializers.Serializer):\n movie_name = serializers.ListField(child=serializers.CharField())\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Movie\n fields = '__all__'\n", "step-4": "from rest_framework import serializers\nfrom .models import *\n\n\nclass MovieSerializer(serializers.Serializer):\n movie_name = serializers.ListField(child=serializers.CharField())\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Movie\n fields = '__all__'\n", "step-5": null, "step-ids": [ 0, 2, 3, 4 ] }
[ 0, 2, 3, 4 ]
print("Praktikum Programa Komputer ") print("Exercise 7.21") print("") print("===========================") print("Nama : Ivanindra Rizky P") print("NIM : I0320054") print("") print("===========================") print("") import random a = [23, 45, 98, 36] print('a = ', a) print('random 1') print('choice = ', random.choice(a)) print('random 2') print('choice = ', random.choice(a)) print('random 3') print('choice = ', random.choice(a))
normal
{ "blob_id": "6b731e329eec3947a17ef8ee8280f2ddf980c81c", "index": 7154, "step-1": "<mask token>\n", "step-2": "print('Praktikum Programa Komputer ')\nprint('Exercise 7.21')\nprint('')\nprint('===========================')\nprint('Nama : Ivanindra Rizky P')\nprint('NIM : I0320054')\nprint('')\nprint('===========================')\nprint('')\n<mask token>\nprint('a = ', a)\nprint('random 1')\nprint('choice = ', random.choice(a))\nprint('random 2')\nprint('choice = ', random.choice(a))\nprint('random 3')\nprint('choice = ', random.choice(a))\n", "step-3": "print('Praktikum Programa Komputer ')\nprint('Exercise 7.21')\nprint('')\nprint('===========================')\nprint('Nama : Ivanindra Rizky P')\nprint('NIM : I0320054')\nprint('')\nprint('===========================')\nprint('')\n<mask token>\na = [23, 45, 98, 36]\nprint('a = ', a)\nprint('random 1')\nprint('choice = ', random.choice(a))\nprint('random 2')\nprint('choice = ', random.choice(a))\nprint('random 3')\nprint('choice = ', random.choice(a))\n", "step-4": "print('Praktikum Programa Komputer ')\nprint('Exercise 7.21')\nprint('')\nprint('===========================')\nprint('Nama : Ivanindra Rizky P')\nprint('NIM : I0320054')\nprint('')\nprint('===========================')\nprint('')\nimport random\na = [23, 45, 98, 36]\nprint('a = ', a)\nprint('random 1')\nprint('choice = ', random.choice(a))\nprint('random 2')\nprint('choice = ', random.choice(a))\nprint('random 3')\nprint('choice = ', random.choice(a))\n", "step-5": "print(\"Praktikum Programa Komputer \")\r\nprint(\"Exercise 7.21\")\r\nprint(\"\")\r\nprint(\"===========================\")\r\nprint(\"Nama : Ivanindra Rizky P\")\r\nprint(\"NIM : I0320054\")\r\nprint(\"\")\r\nprint(\"===========================\")\r\nprint(\"\")\r\nimport random\r\na = [23, 45, 98, 36]\r\nprint('a = ', a)\r\nprint('random 1')\r\nprint('choice = ', random.choice(a))\r\nprint('random 2')\r\nprint('choice = ', random.choice(a))\r\nprint('random 3')\r\nprint('choice = ', random.choice(a))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys from Node import Node from PriorityQueue import PriorityQueue def Print(text): if text is None or len(text) == 0: print('invalid text.') print('--------------------------------------------------------------') return text_set = set() for i in text: text_set.add(i) if len(text_set) == 1: print('invalid text.') print('--------------------------------------------------------------') return print("The size of the data is: {}\n".format(sys.getsizeof(text))) print("The content of the data is: {}\n".format(text)) encoded_data, tree = huffman_encoding(text) print("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2)))) print("The content of the encoded data is: {}\n".format(encoded_data)) decoded_data = huffman_decoding(encoded_data, tree) print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data))) print("The content of the encoded data is: {}\n".format(decoded_data)) print('--------------------------------------------------------------') # this method will print huffman tree def inorder(root): if root is not None: inorder(root.left) print('Data: ', root.data, 'Freq: ', root.frequency) if root.right is not None: print('Right: ', root.right.data) if root.left is not None: print('Left: ', root.left.data) inorder(root.right) # end method inorder(root) def generate_encoded_data(root): """ :param root: is a root of huffman tree :return: dictionary contains all codes for each letter in the text. """ return generate_encoded_data2(root, {}, '') # helper method def generate_encoded_data2(root, dic, code): if root is not None: # go left of the tree if root has a left child. if root.left is not None: s = code + '0' generate_encoded_data2(root.left, dic, s) # if root is a leaf node then add this letter as a key and the code as a value. if str(root.data).isalpha() or root.data == ' ': dic.update({root.data: code}) # go left of the tree if root has a right child. if root.right is not None: s = code + '1' generate_encoded_data2(root.right, dic, s) return dic else: return None def huffman_encoding(data): """ :param data: is the text that will we encode. :return: encoded text as a binary and a root of huffman tree. """ if len(data) == 0 or data is None: print('Please enter a valid data.') return '', None min_heap = PriorityQueue() count_dic = {} # count frequency of each letter and add it in count_dic as a value of the letter. for i in range(len(data)): if data[i] in count_dic: count_dic[data[i]] += 1 else: count_dic[data[i]] = 1 # add all element in count_dic to min_heap. for i, j in count_dic.items(): new_node = Node(i, j) min_heap.push(new_node, new_node.frequency) count: int = 1 # create huffman tree phase 1. while min_heap.size() >= 2: item_1 = min_heap.pop() item_2 = min_heap.pop() sum_frequency = item_1.frequency + item_2.frequency node = Node(count, sum_frequency, item_1, item_2) min_heap.push(node, node.frequency) count += 1 # the root of huffman tree. root = min_heap.pop() # generate the Encoded Data. codes_ = generate_encoded_data(root) # create string represent encoded data. encoded = '' for char in data: if codes_.get(char) is not None: encoded += codes_.get(char) return encoded, root def huffman_decoding(data, root): """ :param data: is the encoded text as a binary. :param root: is the root of huffman tree. :return: the decoded data. """ if len(data) == 0: print('Please enter a valid data.') return '', None decoded = '' i = 0 curr = root while i < len(data): """ If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if the current bit is 1. """ if data[i] == '0': curr = curr.left else: curr = curr.right # go to the next cell of the encoded data. i += 1 # if curr is leaf node then this node contain a letter. if curr.is_leaf(): # add this letter to decoded data. decoded += curr.data # return and start from the root to find the next letter. curr = root return decoded # Test case 1 ----------------------------------- a_great_sentence = 'The bird is the word' Print(a_great_sentence) # Test case 2 ----------------------------------- t1 = '' Print(t1) # will print 'invalid text' # Test case 3 ----------------------------------- t2 = 'AAAAAB' Print(t2) # Test case 4 ----------------------------------- t3 = 'AAAAA' Print(t3) # will print 'invalid text'
normal
{ "blob_id": "bcdd36b534fd3551de9cb40efc11581f4d95a002", "index": 9717, "step-1": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\n<mask token>\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\n<mask token>\nPrint(a_great_sentence)\n<mask token>\nPrint(t1)\n<mask token>\nPrint(t2)\n<mask token>\nPrint(t3)\n", "step-3": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\na_great_sentence = 'The bird is the word'\nPrint(a_great_sentence)\nt1 = ''\nPrint(t1)\nt2 = 'AAAAAB'\nPrint(t2)\nt3 = 'AAAAA'\nPrint(t3)\n", "step-4": "import sys\nfrom Node import Node\nfrom PriorityQueue import PriorityQueue\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\na_great_sentence = 'The bird is the word'\nPrint(a_great_sentence)\nt1 = ''\nPrint(t1)\nt2 = 'AAAAAB'\nPrint(t2)\nt3 = 'AAAAA'\nPrint(t3)\n", "step-5": "import sys\r\nfrom Node import Node\r\nfrom PriorityQueue import PriorityQueue\r\n\r\n\r\ndef Print(text):\r\n if text is None or len(text) == 0:\r\n print('invalid text.')\r\n print('--------------------------------------------------------------')\r\n return\r\n\r\n text_set = set()\r\n for i in text:\r\n text_set.add(i)\r\n\r\n if len(text_set) == 1:\r\n print('invalid text.')\r\n print('--------------------------------------------------------------')\r\n return\r\n\r\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(text)))\r\n print(\"The content of the data is: {}\\n\".format(text))\r\n\r\n encoded_data, tree = huffman_encoding(text)\r\n\r\n print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\r\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\r\n\r\n decoded_data = huffman_decoding(encoded_data, tree)\r\n\r\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\r\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\r\n print('--------------------------------------------------------------')\r\n\r\n\r\n# this method will print huffman tree\r\ndef inorder(root):\r\n if root is not None:\r\n inorder(root.left)\r\n print('Data: ', root.data, 'Freq: ', root.frequency)\r\n if root.right is not None:\r\n print('Right: ', root.right.data)\r\n if root.left is not None:\r\n print('Left: ', root.left.data)\r\n inorder(root.right)\r\n\r\n# end method inorder(root)\r\n\r\n\r\ndef generate_encoded_data(root):\r\n \"\"\"\r\n :param root: is a root of huffman tree\r\n :return: dictionary contains all codes for each letter in the text.\r\n \"\"\"\r\n return generate_encoded_data2(root, {}, '')\r\n\r\n\r\n# helper method\r\ndef generate_encoded_data2(root, dic, code):\r\n if root is not None:\r\n # go left of the tree if root has a left child.\r\n if root.left is not None:\r\n s = code + '0'\r\n generate_encoded_data2(root.left, dic, s)\r\n\r\n # if root is a leaf node then add this letter as a key and the code as a value.\r\n if str(root.data).isalpha() or root.data == ' ':\r\n dic.update({root.data: code})\r\n\r\n # go left of the tree if root has a right child.\r\n if root.right is not None:\r\n s = code + '1'\r\n generate_encoded_data2(root.right, dic, s)\r\n\r\n return dic\r\n else:\r\n return None\r\n\r\n\r\ndef huffman_encoding(data):\r\n \"\"\"\r\n :param data: is the text that will we encode.\r\n :return: encoded text as a binary and a root of huffman tree.\r\n \"\"\"\r\n if len(data) == 0 or data is None:\r\n print('Please enter a valid data.')\r\n return '', None\r\n\r\n min_heap = PriorityQueue()\r\n count_dic = {}\r\n # count frequency of each letter and add it in count_dic as a value of the letter.\r\n for i in range(len(data)):\r\n if data[i] in count_dic:\r\n count_dic[data[i]] += 1\r\n else:\r\n count_dic[data[i]] = 1\r\n\r\n # add all element in count_dic to min_heap.\r\n for i, j in count_dic.items():\r\n new_node = Node(i, j)\r\n min_heap.push(new_node, new_node.frequency)\r\n\r\n count: int = 1\r\n\r\n # create huffman tree phase 1.\r\n while min_heap.size() >= 2:\r\n item_1 = min_heap.pop()\r\n item_2 = min_heap.pop()\r\n sum_frequency = item_1.frequency + item_2.frequency\r\n node = Node(count, sum_frequency, item_1, item_2)\r\n min_heap.push(node, node.frequency)\r\n count += 1\r\n\r\n # the root of huffman tree.\r\n root = min_heap.pop()\r\n # generate the Encoded Data.\r\n codes_ = generate_encoded_data(root)\r\n\r\n # create string represent encoded data.\r\n encoded = ''\r\n for char in data:\r\n if codes_.get(char) is not None:\r\n encoded += codes_.get(char)\r\n\r\n return encoded, root\r\n\r\n\r\ndef huffman_decoding(data, root):\r\n \"\"\"\r\n :param data: is the encoded text as a binary.\r\n :param root: is the root of huffman tree.\r\n :return: the decoded data.\r\n \"\"\"\r\n if len(data) == 0:\r\n print('Please enter a valid data.')\r\n return '', None\r\n\r\n decoded = ''\r\n i = 0\r\n curr = root\r\n while i < len(data):\r\n \"\"\"\r\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\r\n the current bit is 1.\r\n \"\"\"\r\n if data[i] == '0':\r\n curr = curr.left\r\n else:\r\n curr = curr.right\r\n # go to the next cell of the encoded data.\r\n i += 1\r\n\r\n # if curr is leaf node then this node contain a letter.\r\n if curr.is_leaf():\r\n # add this letter to decoded data.\r\n decoded += curr.data\r\n # return and start from the root to find the next letter.\r\n curr = root\r\n\r\n return decoded\r\n\r\n\r\n# Test case 1 -----------------------------------\r\na_great_sentence = 'The bird is the word'\r\nPrint(a_great_sentence)\r\n\r\n# Test case 2 -----------------------------------\r\nt1 = ''\r\nPrint(t1) # will print 'invalid text'\r\n\r\n# Test case 3 -----------------------------------\r\nt2 = 'AAAAAB'\r\nPrint(t2)\r\n\r\n# Test case 4 -----------------------------------\r\nt3 = 'AAAAA'\r\nPrint(t3) # will print 'invalid text'\r\n", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
with open('rosalind_ba3d.txt','r') as f: kmer_length = int(f.readline().strip()) seq = f.readline().strip() dict = {} for offset in range(len(seq)-kmer_length+1): prefix = seq[offset:offset+kmer_length-1] suffix = seq[offset+1:offset+kmer_length] if prefix in dict: dict[prefix].append(suffix) else: dict[prefix] = [suffix] for key in sorted(dict): print(key + " -> " + ','.join(sorted(dict[key])))
normal
{ "blob_id": "050f060bb9d3d46f8b87c9802356bd0da8f926f8", "index": 6244, "step-1": "<mask token>\n", "step-2": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\n<mask token>\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n", "step-3": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\ndict = {}\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n", "step-4": "with open('rosalind_ba3d.txt','r') as f:\r\n\tkmer_length = int(f.readline().strip())\r\n\tseq = f.readline().strip()\r\n\r\ndict = {}\r\nfor offset in range(len(seq)-kmer_length+1):\r\n\tprefix = seq[offset:offset+kmer_length-1]\r\n\tsuffix = seq[offset+1:offset+kmer_length]\r\n\tif prefix in dict:\r\n\t\tdict[prefix].append(suffix)\r\n\telse:\r\n\t\tdict[prefix] = [suffix]\r\n\r\nfor key in sorted(dict):\r\n\tprint(key + \" -> \" + ','.join(sorted(dict[key])))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import data import numpy as np import matplotlib.pyplot as plt import xgboost as xgb import pandas as pd import csv from matplotlib2tikz import save as tikz_save import trial_sets def print_stats(trial_id, dl): wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False) print("Length of Dataframe: " + str(data.get_df_length(wrist_device))) wrist_oxygen = wrist_device.values.flatten() true_oxygen = true_device.values.flatten() sample_count = wrist_oxygen.shape[0] wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen)) print("Samples Collected: " + str(sample_count)) algo_percent = (wrist_reliable_count / sample_count) * 100 print("Algorithm marked {} samples, or {:.1f}%, as reliable".format(wrist_reliable_count, algo_percent)) true_reliable_count = 0 for o1, o2 in zip(wrist_oxygen, true_oxygen): difference = np.abs(np.subtract(o1, o2)) if difference <= dl.threshold: true_reliable_count += 1 actual_precent = (true_reliable_count / sample_count) * 100 print("{}, or {:.1f}%, of labels were within {} of wrist sensor".format(true_reliable_count, actual_precent, dl.threshold)) print("Positive Labels: " + str(true_reliable_count)) def visualize_classifier_results(training_ids, test_id, dl, show_classifier=True): if show_classifier: X_train, y_train = dl.load(training_ids, iid=True) X_test, y_test = dl.load([test_id], iid=False) clf = xgb.XGBClassifier( learning_rate=0.1, n_estimators=101, max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9, colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01, objective='binary:logistic', nthread=data.N_JOBS, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) else: y_pred = None wrist_oxygen, wrist_oxygen_clean, true_oxygen = dl.load_oxygen(test_id, y_pred=y_pred, iid=False) wrist_oxygen = wrist_oxygen[::5] wrist_oxygen_clean = wrist_oxygen_clean[::5] true_oxygen = true_oxygen[::5] if show_classifier: graph_df = pd.concat([wrist_oxygen, true_oxygen, wrist_oxygen_clean], axis=1, sort=True) else: graph_df = pd.concat([wrist_oxygen, true_oxygen], axis=1, sort=True) assert(wrist_oxygen.shape == true_oxygen.shape) assert(graph_df.shape[0] == wrist_oxygen.shape[0]) # plt.figure(figsize=(4 * 1.2, 3 * 1.2)) graph_df.plot.line(figsize=(4 * 1.2, 2 * 1.2)) plt.xlabel("Time (Milliseconds)") plt.ylabel("SpO2 (%)") plt.ylim() plt.legend(loc='lower left') if show_classifier: plt.savefig(data.GRAPH_CACHE + 'classifier-{}-{}.pdf'.format(test_id, str(dl))) tikz_save(data.LTX_CACHE + 'classifier-{}-{}.tex'.format(test_id, str(dl))) else: plt.savefig(data.GRAPH_CACHE + 'algos-{}-{}.pdf'.format(test_id, str(dl))) def print_all_stats(): dl = data.DataLoader(window_size=100, threshold=1.0, algo_name='enhanced', features='comprehensive') for trial_id in trial_sets.top_ids: print("\nStats for trial: {}".format(trial_id)) print_stats(trial_id, dl) def visualize_all_classifier_results(): trial_ids = trial_sets.top_ids dl = data.DataLoader(window_size=100, threshold=2.0, algo_name='enhanced', features='comprehensive') for trial_id in trial_ids: print("Trial {}".format(trial_id)) training_ids = trial_ids.copy() training_ids.remove(trial_id) visualize_classifier_results(training_ids, trial_id, dl) def create_error_cdf(): THRESHOLD = 2.0 dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='enhanced', features='comprehensive') dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='maxim', features='comprehensive') maxim_errors = [] enhanced_errors = [] wristo_errors = [] for trial_id in trial_sets.top_ids: training_ids = trial_sets.top_ids.copy() training_ids.remove(trial_id) X_train, y_train = dl_enhanced.load(training_ids, iid=True) X_test, y_test = dl_enhanced.load([trial_id], iid=False) clf = xgb.XGBClassifier( learning_rate=0.1, n_estimators=101, max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9, colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01, objective='binary:logistic', nthread=data.N_JOBS, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) wrist_enhanced, wrist_clean, fingertip_enhanced = dl_enhanced.load_oxygen(trial_id, y_pred=y_pred, iid=False) wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id, iid=False) wrist_maxim = wrist_maxim.values.flatten() wrist_enhanced = wrist_enhanced.values.flatten() fingertip_maxim = fingertip_maxim.values.flatten() fingertip_enhanced = fingertip_enhanced.values.flatten() wrist_clean = wrist_clean.values.flatten() for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced, fingertip_maxim, fingertip_enhanced, wrist_clean): maxim_errors.append(np.abs(np.subtract(oM, oMF))) enhanced_errors.append(np.abs(np.subtract(oE, oMF))) wristo_errors.append(np.abs(np.subtract(oC, oMF))) maxim_errors = np.array(maxim_errors) enhanced_errors = np.array(enhanced_errors) wristo_errors = np.array(wristo_errors) maxim_errors = maxim_errors[~np.isnan(maxim_errors)] enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)] wristo_errors = wristo_errors[~np.isnan(wristo_errors)] rmses = [maxim_errors, enhanced_errors, wristo_errors] plt.figure(figsize=(4 * 1.2, 2 * 1.2)) for e in rmses: sorted_data = np.sort(e) yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1) plt.plot(sorted_data, yvals) plt.legend(['Baseline', 'Enhanced', 'WristO2']) plt.ylim(0.0, 1.0) plt.xlim(0.0, 10.0) plt.xlabel('Absolute Error') plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf') tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex') def create_fingertip_cdf(): THRESHOLD = 2.0 dl = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='enhanced', features='comprehensive') fingertip_error = [] csv_file = open(data.GRAPH_CACHE + 'csv-fingertip.csv', 'w') csvwriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) csvwriter.writerow(['reflective', 'transitive']) for trial_id in trial_sets.top_ids: wrist_oxygen, fingertip_oxygen, transitive_oxygen = dl.load_all_oxygen(trial_id) for oF, oT in zip(fingertip_oxygen, transitive_oxygen): csvwriter.writerow([oF, oT]) fingertip_error.append(np.square(np.subtract(oF, oT))) fingertip_error = np.array(fingertip_error) fingertip_error = fingertip_error[~np.isnan(fingertip_error)] plt.figure() sorted_data = np.sort(fingertip_error) yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1) plt.plot(sorted_data, yvals) # plt.legend(['Baseline', 'Enhanced']) plt.ylim(0.0, 1.0) plt.xlabel('RMSE') plt.savefig(data.GRAPH_CACHE + 'cdf-fingertip.pdf') csv_file.close() if __name__ == '__main__': # print_all_stats() visualize_all_classifier_results() create_error_cdf() # create_fingertip_cdf()
normal
{ "blob_id": "836e2fd6eca7453ab7a3da2ecb21705552b5f627", "index": 5157, "step-1": "<mask token>\n\n\ndef print_stats(trial_id, dl):\n wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False)\n print('Length of Dataframe: ' + str(data.get_df_length(wrist_device)))\n wrist_oxygen = wrist_device.values.flatten()\n true_oxygen = true_device.values.flatten()\n sample_count = wrist_oxygen.shape[0]\n wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen))\n print('Samples Collected: ' + str(sample_count))\n algo_percent = wrist_reliable_count / sample_count * 100\n print('Algorithm marked {} samples, or {:.1f}%, as reliable'.format(\n wrist_reliable_count, algo_percent))\n true_reliable_count = 0\n for o1, o2 in zip(wrist_oxygen, true_oxygen):\n difference = np.abs(np.subtract(o1, o2))\n if difference <= dl.threshold:\n true_reliable_count += 1\n actual_precent = true_reliable_count / sample_count * 100\n print('{}, or {:.1f}%, of labels were within {} of wrist sensor'.format\n (true_reliable_count, actual_precent, dl.threshold))\n print('Positive Labels: ' + str(true_reliable_count))\n\n\n<mask token>\n\n\ndef print_all_stats():\n dl = data.DataLoader(window_size=100, threshold=1.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_sets.top_ids:\n print('\\nStats for trial: {}'.format(trial_id))\n print_stats(trial_id, dl)\n\n\ndef visualize_all_classifier_results():\n trial_ids = trial_sets.top_ids\n dl = data.DataLoader(window_size=100, threshold=2.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_ids:\n print('Trial {}'.format(trial_id))\n training_ids = trial_ids.copy()\n training_ids.remove(trial_id)\n visualize_classifier_results(training_ids, trial_id, dl)\n\n\ndef create_error_cdf():\n THRESHOLD = 2.0\n dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='enhanced', features='comprehensive')\n dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='maxim', features='comprehensive')\n maxim_errors = []\n enhanced_errors = []\n wristo_errors = []\n for trial_id in trial_sets.top_ids:\n training_ids = trial_sets.top_ids.copy()\n training_ids.remove(trial_id)\n X_train, y_train = dl_enhanced.load(training_ids, iid=True)\n X_test, y_test = dl_enhanced.load([trial_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n wrist_enhanced, wrist_clean, fingertip_enhanced = (dl_enhanced.\n load_oxygen(trial_id, y_pred=y_pred, iid=False))\n wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id,\n iid=False)\n wrist_maxim = wrist_maxim.values.flatten()\n wrist_enhanced = wrist_enhanced.values.flatten()\n fingertip_maxim = fingertip_maxim.values.flatten()\n fingertip_enhanced = fingertip_enhanced.values.flatten()\n wrist_clean = wrist_clean.values.flatten()\n for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced,\n fingertip_maxim, fingertip_enhanced, wrist_clean):\n maxim_errors.append(np.abs(np.subtract(oM, oMF)))\n enhanced_errors.append(np.abs(np.subtract(oE, oMF)))\n wristo_errors.append(np.abs(np.subtract(oC, oMF)))\n maxim_errors = np.array(maxim_errors)\n enhanced_errors = np.array(enhanced_errors)\n wristo_errors = np.array(wristo_errors)\n maxim_errors = maxim_errors[~np.isnan(maxim_errors)]\n enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)]\n wristo_errors = wristo_errors[~np.isnan(wristo_errors)]\n rmses = [maxim_errors, enhanced_errors, wristo_errors]\n plt.figure(figsize=(4 * 1.2, 2 * 1.2))\n for e in rmses:\n sorted_data = np.sort(e)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.legend(['Baseline', 'Enhanced', 'WristO2'])\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 10.0)\n plt.xlabel('Absolute Error')\n plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf')\n tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef print_stats(trial_id, dl):\n wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False)\n print('Length of Dataframe: ' + str(data.get_df_length(wrist_device)))\n wrist_oxygen = wrist_device.values.flatten()\n true_oxygen = true_device.values.flatten()\n sample_count = wrist_oxygen.shape[0]\n wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen))\n print('Samples Collected: ' + str(sample_count))\n algo_percent = wrist_reliable_count / sample_count * 100\n print('Algorithm marked {} samples, or {:.1f}%, as reliable'.format(\n wrist_reliable_count, algo_percent))\n true_reliable_count = 0\n for o1, o2 in zip(wrist_oxygen, true_oxygen):\n difference = np.abs(np.subtract(o1, o2))\n if difference <= dl.threshold:\n true_reliable_count += 1\n actual_precent = true_reliable_count / sample_count * 100\n print('{}, or {:.1f}%, of labels were within {} of wrist sensor'.format\n (true_reliable_count, actual_precent, dl.threshold))\n print('Positive Labels: ' + str(true_reliable_count))\n\n\ndef visualize_classifier_results(training_ids, test_id, dl, show_classifier\n =True):\n if show_classifier:\n X_train, y_train = dl.load(training_ids, iid=True)\n X_test, y_test = dl.load([test_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n else:\n y_pred = None\n wrist_oxygen, wrist_oxygen_clean, true_oxygen = dl.load_oxygen(test_id,\n y_pred=y_pred, iid=False)\n wrist_oxygen = wrist_oxygen[::5]\n wrist_oxygen_clean = wrist_oxygen_clean[::5]\n true_oxygen = true_oxygen[::5]\n if show_classifier:\n graph_df = pd.concat([wrist_oxygen, true_oxygen, wrist_oxygen_clean\n ], axis=1, sort=True)\n else:\n graph_df = pd.concat([wrist_oxygen, true_oxygen], axis=1, sort=True)\n assert wrist_oxygen.shape == true_oxygen.shape\n assert graph_df.shape[0] == wrist_oxygen.shape[0]\n graph_df.plot.line(figsize=(4 * 1.2, 2 * 1.2))\n plt.xlabel('Time (Milliseconds)')\n plt.ylabel('SpO2 (%)')\n plt.ylim()\n plt.legend(loc='lower left')\n if show_classifier:\n plt.savefig(data.GRAPH_CACHE + 'classifier-{}-{}.pdf'.format(\n test_id, str(dl)))\n tikz_save(data.LTX_CACHE + 'classifier-{}-{}.tex'.format(test_id,\n str(dl)))\n else:\n plt.savefig(data.GRAPH_CACHE + 'algos-{}-{}.pdf'.format(test_id,\n str(dl)))\n\n\ndef print_all_stats():\n dl = data.DataLoader(window_size=100, threshold=1.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_sets.top_ids:\n print('\\nStats for trial: {}'.format(trial_id))\n print_stats(trial_id, dl)\n\n\ndef visualize_all_classifier_results():\n trial_ids = trial_sets.top_ids\n dl = data.DataLoader(window_size=100, threshold=2.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_ids:\n print('Trial {}'.format(trial_id))\n training_ids = trial_ids.copy()\n training_ids.remove(trial_id)\n visualize_classifier_results(training_ids, trial_id, dl)\n\n\ndef create_error_cdf():\n THRESHOLD = 2.0\n dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='enhanced', features='comprehensive')\n dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='maxim', features='comprehensive')\n maxim_errors = []\n enhanced_errors = []\n wristo_errors = []\n for trial_id in trial_sets.top_ids:\n training_ids = trial_sets.top_ids.copy()\n training_ids.remove(trial_id)\n X_train, y_train = dl_enhanced.load(training_ids, iid=True)\n X_test, y_test = dl_enhanced.load([trial_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n wrist_enhanced, wrist_clean, fingertip_enhanced = (dl_enhanced.\n load_oxygen(trial_id, y_pred=y_pred, iid=False))\n wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id,\n iid=False)\n wrist_maxim = wrist_maxim.values.flatten()\n wrist_enhanced = wrist_enhanced.values.flatten()\n fingertip_maxim = fingertip_maxim.values.flatten()\n fingertip_enhanced = fingertip_enhanced.values.flatten()\n wrist_clean = wrist_clean.values.flatten()\n for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced,\n fingertip_maxim, fingertip_enhanced, wrist_clean):\n maxim_errors.append(np.abs(np.subtract(oM, oMF)))\n enhanced_errors.append(np.abs(np.subtract(oE, oMF)))\n wristo_errors.append(np.abs(np.subtract(oC, oMF)))\n maxim_errors = np.array(maxim_errors)\n enhanced_errors = np.array(enhanced_errors)\n wristo_errors = np.array(wristo_errors)\n maxim_errors = maxim_errors[~np.isnan(maxim_errors)]\n enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)]\n wristo_errors = wristo_errors[~np.isnan(wristo_errors)]\n rmses = [maxim_errors, enhanced_errors, wristo_errors]\n plt.figure(figsize=(4 * 1.2, 2 * 1.2))\n for e in rmses:\n sorted_data = np.sort(e)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.legend(['Baseline', 'Enhanced', 'WristO2'])\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 10.0)\n plt.xlabel('Absolute Error')\n plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf')\n tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex')\n\n\ndef create_fingertip_cdf():\n THRESHOLD = 2.0\n dl = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name=\n 'enhanced', features='comprehensive')\n fingertip_error = []\n csv_file = open(data.GRAPH_CACHE + 'csv-fingertip.csv', 'w')\n csvwriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL)\n csvwriter.writerow(['reflective', 'transitive'])\n for trial_id in trial_sets.top_ids:\n wrist_oxygen, fingertip_oxygen, transitive_oxygen = dl.load_all_oxygen(\n trial_id)\n for oF, oT in zip(fingertip_oxygen, transitive_oxygen):\n csvwriter.writerow([oF, oT])\n fingertip_error.append(np.square(np.subtract(oF, oT)))\n fingertip_error = np.array(fingertip_error)\n fingertip_error = fingertip_error[~np.isnan(fingertip_error)]\n plt.figure()\n sorted_data = np.sort(fingertip_error)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.ylim(0.0, 1.0)\n plt.xlabel('RMSE')\n plt.savefig(data.GRAPH_CACHE + 'cdf-fingertip.pdf')\n csv_file.close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef print_stats(trial_id, dl):\n wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False)\n print('Length of Dataframe: ' + str(data.get_df_length(wrist_device)))\n wrist_oxygen = wrist_device.values.flatten()\n true_oxygen = true_device.values.flatten()\n sample_count = wrist_oxygen.shape[0]\n wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen))\n print('Samples Collected: ' + str(sample_count))\n algo_percent = wrist_reliable_count / sample_count * 100\n print('Algorithm marked {} samples, or {:.1f}%, as reliable'.format(\n wrist_reliable_count, algo_percent))\n true_reliable_count = 0\n for o1, o2 in zip(wrist_oxygen, true_oxygen):\n difference = np.abs(np.subtract(o1, o2))\n if difference <= dl.threshold:\n true_reliable_count += 1\n actual_precent = true_reliable_count / sample_count * 100\n print('{}, or {:.1f}%, of labels were within {} of wrist sensor'.format\n (true_reliable_count, actual_precent, dl.threshold))\n print('Positive Labels: ' + str(true_reliable_count))\n\n\ndef visualize_classifier_results(training_ids, test_id, dl, show_classifier\n =True):\n if show_classifier:\n X_train, y_train = dl.load(training_ids, iid=True)\n X_test, y_test = dl.load([test_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n else:\n y_pred = None\n wrist_oxygen, wrist_oxygen_clean, true_oxygen = dl.load_oxygen(test_id,\n y_pred=y_pred, iid=False)\n wrist_oxygen = wrist_oxygen[::5]\n wrist_oxygen_clean = wrist_oxygen_clean[::5]\n true_oxygen = true_oxygen[::5]\n if show_classifier:\n graph_df = pd.concat([wrist_oxygen, true_oxygen, wrist_oxygen_clean\n ], axis=1, sort=True)\n else:\n graph_df = pd.concat([wrist_oxygen, true_oxygen], axis=1, sort=True)\n assert wrist_oxygen.shape == true_oxygen.shape\n assert graph_df.shape[0] == wrist_oxygen.shape[0]\n graph_df.plot.line(figsize=(4 * 1.2, 2 * 1.2))\n plt.xlabel('Time (Milliseconds)')\n plt.ylabel('SpO2 (%)')\n plt.ylim()\n plt.legend(loc='lower left')\n if show_classifier:\n plt.savefig(data.GRAPH_CACHE + 'classifier-{}-{}.pdf'.format(\n test_id, str(dl)))\n tikz_save(data.LTX_CACHE + 'classifier-{}-{}.tex'.format(test_id,\n str(dl)))\n else:\n plt.savefig(data.GRAPH_CACHE + 'algos-{}-{}.pdf'.format(test_id,\n str(dl)))\n\n\ndef print_all_stats():\n dl = data.DataLoader(window_size=100, threshold=1.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_sets.top_ids:\n print('\\nStats for trial: {}'.format(trial_id))\n print_stats(trial_id, dl)\n\n\ndef visualize_all_classifier_results():\n trial_ids = trial_sets.top_ids\n dl = data.DataLoader(window_size=100, threshold=2.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_ids:\n print('Trial {}'.format(trial_id))\n training_ids = trial_ids.copy()\n training_ids.remove(trial_id)\n visualize_classifier_results(training_ids, trial_id, dl)\n\n\ndef create_error_cdf():\n THRESHOLD = 2.0\n dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='enhanced', features='comprehensive')\n dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='maxim', features='comprehensive')\n maxim_errors = []\n enhanced_errors = []\n wristo_errors = []\n for trial_id in trial_sets.top_ids:\n training_ids = trial_sets.top_ids.copy()\n training_ids.remove(trial_id)\n X_train, y_train = dl_enhanced.load(training_ids, iid=True)\n X_test, y_test = dl_enhanced.load([trial_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n wrist_enhanced, wrist_clean, fingertip_enhanced = (dl_enhanced.\n load_oxygen(trial_id, y_pred=y_pred, iid=False))\n wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id,\n iid=False)\n wrist_maxim = wrist_maxim.values.flatten()\n wrist_enhanced = wrist_enhanced.values.flatten()\n fingertip_maxim = fingertip_maxim.values.flatten()\n fingertip_enhanced = fingertip_enhanced.values.flatten()\n wrist_clean = wrist_clean.values.flatten()\n for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced,\n fingertip_maxim, fingertip_enhanced, wrist_clean):\n maxim_errors.append(np.abs(np.subtract(oM, oMF)))\n enhanced_errors.append(np.abs(np.subtract(oE, oMF)))\n wristo_errors.append(np.abs(np.subtract(oC, oMF)))\n maxim_errors = np.array(maxim_errors)\n enhanced_errors = np.array(enhanced_errors)\n wristo_errors = np.array(wristo_errors)\n maxim_errors = maxim_errors[~np.isnan(maxim_errors)]\n enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)]\n wristo_errors = wristo_errors[~np.isnan(wristo_errors)]\n rmses = [maxim_errors, enhanced_errors, wristo_errors]\n plt.figure(figsize=(4 * 1.2, 2 * 1.2))\n for e in rmses:\n sorted_data = np.sort(e)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.legend(['Baseline', 'Enhanced', 'WristO2'])\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 10.0)\n plt.xlabel('Absolute Error')\n plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf')\n tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex')\n\n\ndef create_fingertip_cdf():\n THRESHOLD = 2.0\n dl = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name=\n 'enhanced', features='comprehensive')\n fingertip_error = []\n csv_file = open(data.GRAPH_CACHE + 'csv-fingertip.csv', 'w')\n csvwriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL)\n csvwriter.writerow(['reflective', 'transitive'])\n for trial_id in trial_sets.top_ids:\n wrist_oxygen, fingertip_oxygen, transitive_oxygen = dl.load_all_oxygen(\n trial_id)\n for oF, oT in zip(fingertip_oxygen, transitive_oxygen):\n csvwriter.writerow([oF, oT])\n fingertip_error.append(np.square(np.subtract(oF, oT)))\n fingertip_error = np.array(fingertip_error)\n fingertip_error = fingertip_error[~np.isnan(fingertip_error)]\n plt.figure()\n sorted_data = np.sort(fingertip_error)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.ylim(0.0, 1.0)\n plt.xlabel('RMSE')\n plt.savefig(data.GRAPH_CACHE + 'cdf-fingertip.pdf')\n csv_file.close()\n\n\nif __name__ == '__main__':\n visualize_all_classifier_results()\n create_error_cdf()\n", "step-4": "import data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport pandas as pd\nimport csv\nfrom matplotlib2tikz import save as tikz_save\nimport trial_sets\n\n\ndef print_stats(trial_id, dl):\n wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False)\n print('Length of Dataframe: ' + str(data.get_df_length(wrist_device)))\n wrist_oxygen = wrist_device.values.flatten()\n true_oxygen = true_device.values.flatten()\n sample_count = wrist_oxygen.shape[0]\n wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen))\n print('Samples Collected: ' + str(sample_count))\n algo_percent = wrist_reliable_count / sample_count * 100\n print('Algorithm marked {} samples, or {:.1f}%, as reliable'.format(\n wrist_reliable_count, algo_percent))\n true_reliable_count = 0\n for o1, o2 in zip(wrist_oxygen, true_oxygen):\n difference = np.abs(np.subtract(o1, o2))\n if difference <= dl.threshold:\n true_reliable_count += 1\n actual_precent = true_reliable_count / sample_count * 100\n print('{}, or {:.1f}%, of labels were within {} of wrist sensor'.format\n (true_reliable_count, actual_precent, dl.threshold))\n print('Positive Labels: ' + str(true_reliable_count))\n\n\ndef visualize_classifier_results(training_ids, test_id, dl, show_classifier\n =True):\n if show_classifier:\n X_train, y_train = dl.load(training_ids, iid=True)\n X_test, y_test = dl.load([test_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n else:\n y_pred = None\n wrist_oxygen, wrist_oxygen_clean, true_oxygen = dl.load_oxygen(test_id,\n y_pred=y_pred, iid=False)\n wrist_oxygen = wrist_oxygen[::5]\n wrist_oxygen_clean = wrist_oxygen_clean[::5]\n true_oxygen = true_oxygen[::5]\n if show_classifier:\n graph_df = pd.concat([wrist_oxygen, true_oxygen, wrist_oxygen_clean\n ], axis=1, sort=True)\n else:\n graph_df = pd.concat([wrist_oxygen, true_oxygen], axis=1, sort=True)\n assert wrist_oxygen.shape == true_oxygen.shape\n assert graph_df.shape[0] == wrist_oxygen.shape[0]\n graph_df.plot.line(figsize=(4 * 1.2, 2 * 1.2))\n plt.xlabel('Time (Milliseconds)')\n plt.ylabel('SpO2 (%)')\n plt.ylim()\n plt.legend(loc='lower left')\n if show_classifier:\n plt.savefig(data.GRAPH_CACHE + 'classifier-{}-{}.pdf'.format(\n test_id, str(dl)))\n tikz_save(data.LTX_CACHE + 'classifier-{}-{}.tex'.format(test_id,\n str(dl)))\n else:\n plt.savefig(data.GRAPH_CACHE + 'algos-{}-{}.pdf'.format(test_id,\n str(dl)))\n\n\ndef print_all_stats():\n dl = data.DataLoader(window_size=100, threshold=1.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_sets.top_ids:\n print('\\nStats for trial: {}'.format(trial_id))\n print_stats(trial_id, dl)\n\n\ndef visualize_all_classifier_results():\n trial_ids = trial_sets.top_ids\n dl = data.DataLoader(window_size=100, threshold=2.0, algo_name=\n 'enhanced', features='comprehensive')\n for trial_id in trial_ids:\n print('Trial {}'.format(trial_id))\n training_ids = trial_ids.copy()\n training_ids.remove(trial_id)\n visualize_classifier_results(training_ids, trial_id, dl)\n\n\ndef create_error_cdf():\n THRESHOLD = 2.0\n dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='enhanced', features='comprehensive')\n dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD,\n algo_name='maxim', features='comprehensive')\n maxim_errors = []\n enhanced_errors = []\n wristo_errors = []\n for trial_id in trial_sets.top_ids:\n training_ids = trial_sets.top_ids.copy()\n training_ids.remove(trial_id)\n X_train, y_train = dl_enhanced.load(training_ids, iid=True)\n X_test, y_test = dl_enhanced.load([trial_id], iid=False)\n clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=101,\n max_depth=3, min_child_weight=3, gamma=0.3, subsample=0.9,\n colsample_bytree=0.6, scale_pos_weight=1, reg_alpha=0.01,\n objective='binary:logistic', nthread=data.N_JOBS, random_state=42)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n wrist_enhanced, wrist_clean, fingertip_enhanced = (dl_enhanced.\n load_oxygen(trial_id, y_pred=y_pred, iid=False))\n wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id,\n iid=False)\n wrist_maxim = wrist_maxim.values.flatten()\n wrist_enhanced = wrist_enhanced.values.flatten()\n fingertip_maxim = fingertip_maxim.values.flatten()\n fingertip_enhanced = fingertip_enhanced.values.flatten()\n wrist_clean = wrist_clean.values.flatten()\n for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced,\n fingertip_maxim, fingertip_enhanced, wrist_clean):\n maxim_errors.append(np.abs(np.subtract(oM, oMF)))\n enhanced_errors.append(np.abs(np.subtract(oE, oMF)))\n wristo_errors.append(np.abs(np.subtract(oC, oMF)))\n maxim_errors = np.array(maxim_errors)\n enhanced_errors = np.array(enhanced_errors)\n wristo_errors = np.array(wristo_errors)\n maxim_errors = maxim_errors[~np.isnan(maxim_errors)]\n enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)]\n wristo_errors = wristo_errors[~np.isnan(wristo_errors)]\n rmses = [maxim_errors, enhanced_errors, wristo_errors]\n plt.figure(figsize=(4 * 1.2, 2 * 1.2))\n for e in rmses:\n sorted_data = np.sort(e)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.legend(['Baseline', 'Enhanced', 'WristO2'])\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 10.0)\n plt.xlabel('Absolute Error')\n plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf')\n tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex')\n\n\ndef create_fingertip_cdf():\n THRESHOLD = 2.0\n dl = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name=\n 'enhanced', features='comprehensive')\n fingertip_error = []\n csv_file = open(data.GRAPH_CACHE + 'csv-fingertip.csv', 'w')\n csvwriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL)\n csvwriter.writerow(['reflective', 'transitive'])\n for trial_id in trial_sets.top_ids:\n wrist_oxygen, fingertip_oxygen, transitive_oxygen = dl.load_all_oxygen(\n trial_id)\n for oF, oT in zip(fingertip_oxygen, transitive_oxygen):\n csvwriter.writerow([oF, oT])\n fingertip_error.append(np.square(np.subtract(oF, oT)))\n fingertip_error = np.array(fingertip_error)\n fingertip_error = fingertip_error[~np.isnan(fingertip_error)]\n plt.figure()\n sorted_data = np.sort(fingertip_error)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n plt.ylim(0.0, 1.0)\n plt.xlabel('RMSE')\n plt.savefig(data.GRAPH_CACHE + 'cdf-fingertip.pdf')\n csv_file.close()\n\n\nif __name__ == '__main__':\n visualize_all_classifier_results()\n create_error_cdf()\n", "step-5": "import data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport pandas as pd\nimport csv\n\nfrom matplotlib2tikz import save as tikz_save\n\nimport trial_sets\n\n\ndef print_stats(trial_id, dl):\n wrist_device, _, true_device = dl.load_oxygen(trial_id, iid=False)\n print(\"Length of Dataframe: \" + str(data.get_df_length(wrist_device)))\n\n wrist_oxygen = wrist_device.values.flatten()\n true_oxygen = true_device.values.flatten()\n\n sample_count = wrist_oxygen.shape[0]\n wrist_reliable_count = np.count_nonzero(~np.isnan(wrist_oxygen))\n\n print(\"Samples Collected: \" + str(sample_count))\n\n\n algo_percent = (wrist_reliable_count / sample_count) * 100\n print(\"Algorithm marked {} samples, or {:.1f}%, as reliable\".format(wrist_reliable_count, algo_percent))\n\n true_reliable_count = 0\n for o1, o2 in zip(wrist_oxygen, true_oxygen):\n difference = np.abs(np.subtract(o1, o2))\n if difference <= dl.threshold:\n true_reliable_count += 1\n\n actual_precent = (true_reliable_count / sample_count) * 100\n print(\"{}, or {:.1f}%, of labels were within {} of wrist sensor\".format(true_reliable_count, actual_precent, dl.threshold))\n print(\"Positive Labels: \" + str(true_reliable_count))\n\n\ndef visualize_classifier_results(training_ids, test_id, dl, show_classifier=True):\n\n if show_classifier:\n X_train, y_train = dl.load(training_ids, iid=True)\n X_test, y_test = dl.load([test_id], iid=False)\n\n clf = xgb.XGBClassifier(\n learning_rate=0.1,\n n_estimators=101,\n max_depth=3,\n min_child_weight=3,\n gamma=0.3,\n subsample=0.9,\n colsample_bytree=0.6,\n scale_pos_weight=1,\n reg_alpha=0.01,\n objective='binary:logistic',\n nthread=data.N_JOBS,\n random_state=42)\n\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n else:\n y_pred = None\n\n wrist_oxygen, wrist_oxygen_clean, true_oxygen = dl.load_oxygen(test_id, y_pred=y_pred, iid=False)\n\n wrist_oxygen = wrist_oxygen[::5]\n wrist_oxygen_clean = wrist_oxygen_clean[::5]\n true_oxygen = true_oxygen[::5]\n\n if show_classifier:\n graph_df = pd.concat([wrist_oxygen, true_oxygen, wrist_oxygen_clean], axis=1, sort=True)\n else:\n graph_df = pd.concat([wrist_oxygen, true_oxygen], axis=1, sort=True)\n\n assert(wrist_oxygen.shape == true_oxygen.shape)\n assert(graph_df.shape[0] == wrist_oxygen.shape[0])\n\n # plt.figure(figsize=(4 * 1.2, 3 * 1.2))\n\n graph_df.plot.line(figsize=(4 * 1.2, 2 * 1.2))\n plt.xlabel(\"Time (Milliseconds)\")\n plt.ylabel(\"SpO2 (%)\")\n plt.ylim()\n plt.legend(loc='lower left')\n if show_classifier:\n plt.savefig(data.GRAPH_CACHE + 'classifier-{}-{}.pdf'.format(test_id, str(dl)))\n tikz_save(data.LTX_CACHE + 'classifier-{}-{}.tex'.format(test_id, str(dl)))\n else:\n plt.savefig(data.GRAPH_CACHE + 'algos-{}-{}.pdf'.format(test_id, str(dl)))\n\n\ndef print_all_stats():\n dl = data.DataLoader(window_size=100, threshold=1.0, algo_name='enhanced', features='comprehensive')\n for trial_id in trial_sets.top_ids:\n print(\"\\nStats for trial: {}\".format(trial_id))\n print_stats(trial_id, dl)\n\n\ndef visualize_all_classifier_results():\n\n trial_ids = trial_sets.top_ids\n\n dl = data.DataLoader(window_size=100, threshold=2.0, algo_name='enhanced', features='comprehensive')\n\n for trial_id in trial_ids:\n print(\"Trial {}\".format(trial_id))\n training_ids = trial_ids.copy()\n training_ids.remove(trial_id)\n\n visualize_classifier_results(training_ids, trial_id, dl)\n\n\ndef create_error_cdf():\n THRESHOLD = 2.0\n dl_enhanced = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='enhanced', features='comprehensive')\n dl_maxim = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='maxim', features='comprehensive')\n\n maxim_errors = []\n enhanced_errors = []\n wristo_errors = []\n\n for trial_id in trial_sets.top_ids:\n\n training_ids = trial_sets.top_ids.copy()\n training_ids.remove(trial_id)\n X_train, y_train = dl_enhanced.load(training_ids, iid=True)\n X_test, y_test = dl_enhanced.load([trial_id], iid=False)\n\n clf = xgb.XGBClassifier(\n learning_rate=0.1,\n n_estimators=101,\n max_depth=3,\n min_child_weight=3,\n gamma=0.3,\n subsample=0.9,\n colsample_bytree=0.6,\n scale_pos_weight=1,\n reg_alpha=0.01,\n objective='binary:logistic',\n nthread=data.N_JOBS,\n random_state=42)\n\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n wrist_enhanced, wrist_clean, fingertip_enhanced = dl_enhanced.load_oxygen(trial_id, y_pred=y_pred, iid=False)\n wrist_maxim, _, fingertip_maxim = dl_maxim.load_oxygen(trial_id, iid=False)\n\n wrist_maxim = wrist_maxim.values.flatten()\n wrist_enhanced = wrist_enhanced.values.flatten()\n fingertip_maxim = fingertip_maxim.values.flatten()\n fingertip_enhanced = fingertip_enhanced.values.flatten()\n wrist_clean = wrist_clean.values.flatten()\n\n for oM, oE, oMF, oEF, oC in zip(wrist_maxim, wrist_enhanced, fingertip_maxim, fingertip_enhanced, wrist_clean):\n maxim_errors.append(np.abs(np.subtract(oM, oMF)))\n enhanced_errors.append(np.abs(np.subtract(oE, oMF)))\n wristo_errors.append(np.abs(np.subtract(oC, oMF)))\n\n maxim_errors = np.array(maxim_errors)\n enhanced_errors = np.array(enhanced_errors)\n wristo_errors = np.array(wristo_errors)\n maxim_errors = maxim_errors[~np.isnan(maxim_errors)]\n enhanced_errors = enhanced_errors[~np.isnan(enhanced_errors)]\n wristo_errors = wristo_errors[~np.isnan(wristo_errors)]\n rmses = [maxim_errors, enhanced_errors, wristo_errors]\n\n plt.figure(figsize=(4 * 1.2, 2 * 1.2))\n\n for e in rmses:\n sorted_data = np.sort(e)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n\n plt.legend(['Baseline', 'Enhanced', 'WristO2'])\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 10.0)\n plt.xlabel('Absolute Error')\n\n plt.savefig(data.GRAPH_CACHE + 'cdf-error-algo.pdf')\n tikz_save(data.LTX_CACHE + 'cdf-error-algo.tex')\n\n\ndef create_fingertip_cdf():\n THRESHOLD = 2.0\n dl = data.DataLoader(window_size=100, threshold=THRESHOLD, algo_name='enhanced', features='comprehensive')\n\n fingertip_error = []\n\n csv_file = open(data.GRAPH_CACHE + 'csv-fingertip.csv', 'w')\n\n csvwriter = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\n csvwriter.writerow(['reflective', 'transitive'])\n\n for trial_id in trial_sets.top_ids:\n wrist_oxygen, fingertip_oxygen, transitive_oxygen = dl.load_all_oxygen(trial_id)\n\n for oF, oT in zip(fingertip_oxygen, transitive_oxygen):\n csvwriter.writerow([oF, oT])\n fingertip_error.append(np.square(np.subtract(oF, oT)))\n\n fingertip_error = np.array(fingertip_error)\n fingertip_error = fingertip_error[~np.isnan(fingertip_error)]\n\n plt.figure()\n\n\n sorted_data = np.sort(fingertip_error)\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\n plt.plot(sorted_data, yvals)\n\n # plt.legend(['Baseline', 'Enhanced'])\n plt.ylim(0.0, 1.0)\n plt.xlabel('RMSE')\n\n plt.savefig(data.GRAPH_CACHE + 'cdf-fingertip.pdf')\n\n csv_file.close()\n\n\nif __name__ == '__main__':\n # print_all_stats()\n visualize_all_classifier_results()\n create_error_cdf()\n # create_fingertip_cdf()\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
from dagster import job, op @op def do_something(): return "foo" @job def do_it_all(): do_something()
normal
{ "blob_id": "53cf6e97c3b71b1063d5b6bce5aa444933b69809", "index": 3229, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@job\ndef do_it_all():\n do_something()\n", "step-3": "<mask token>\n\n\n@op\ndef do_something():\n return 'foo'\n\n\n@job\ndef do_it_all():\n do_something()\n", "step-4": "from dagster import job, op\n\n\n@op\ndef do_something():\n return 'foo'\n\n\n@job\ndef do_it_all():\n do_something()\n", "step-5": "from dagster import job, op\n\n\n@op\ndef do_something():\n return \"foo\"\n\n\n@job\ndef do_it_all():\n do_something()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import numpy as np import torch from torch import nn from torch.nn import functional as F import torch.utils.data as td import torchvision as tv import pandas as pd from PIL import Image from matplotlib import pyplot as plt from utils import imshow, NNRegressor class DnCNN(NNRegressor): def __init__(self, D, C=64): super(DnCNN, self).__init__() self.D = D # convolution layers self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) for i in range(D): h = F.relu(self.bn[i](self.conv[i+1](h))) y = self.conv[D+1](h) + x return y class UDnCNN(NNRegressor): def __init__(self, D, C=64): super(UDnCNN, self).__init__() self.D = D # convolution layers self.conv = nn.ModuleList() self.conv.append(nn.Conv2d(3, C, 3, padding=1)) self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)]) self.conv.append(nn.Conv2d(C, 3, 3, padding=1)) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] idx_buff = [] shape_buff = [] for i in range(D//2-1): shape_buff.append(h.shape) h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))), kernel_size=(2, 2), return_indices=True) h_buff.append(h) idx_buff.append(idx) for i in range(D//2-1, D//2+1): h = F.relu(self.bn[i](self.conv[i+1](h))) for i in range(D//2+1, D): j = i - (D // 2 + 1) + 1 h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))), idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j]) y = self.conv[D+1](h) + x return y class DUDnCNN(NNRegressor): def __init__(self, D, C=64): super(DUDnCNN, self).__init__() self.D = D # compute k(max_pool) and l(max_unpool) k = [0] k.extend([i for i in range(D//2)]) k.extend([k[-1] for _ in range(D//2, D+1)]) l = [0 for _ in range(D//2+1)] l.extend([i for i in range(D+1-(D//2+1))]) l.append(l[-1]) # holes and dilations for convolution layers holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)] dilations = [i+1 for i in holes] # convolution layers self.conv = nn.ModuleList() self.conv.append( nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0])) self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1], dilation=dilations[i+1]) for i in range(D)]) self.conv.append( nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1])) # apply He's initialization for i in range(len(self.conv[:-1])): nn.init.kaiming_normal_( self.conv[i].weight.data, nonlinearity='relu') # batch normalization self.bn = nn.ModuleList() self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)]) # initialize the weights of the Batch normalization layers for i in range(D): nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C)) def forward(self, x): D = self.D h = F.relu(self.conv[0](x)) h_buff = [] for i in range(D//2 - 1): torch.backends.cudnn.benchmark = True h = self.conv[i+1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) h_buff.append(h) for i in range(D//2 - 1, D//2 + 1): torch.backends.cudnn.benchmark = True h = self.conv[i+1](h) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) for i in range(D//2 + 1, D): j = i - (D//2 + 1) + 1 torch.backends.cudnn.benchmark = True h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2)) torch.backends.cudnn.benchmark = False h = F.relu(self.bn[i](h)) y = self.conv[D+1](h) + x return y
normal
{ "blob_id": "9c60d82d42716abb036dc7297a2dca66f0508984", "index": 7626, "step-1": "<mask token>\n\n\nclass UDnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-2": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-3": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-4": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n", "step-5": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n y = self.conv[D+1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D//2-1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D//2-1, D//2+1):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n for i in range(D//2+1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))),\n idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j])\n y = self.conv[D+1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n\n # compute k(max_pool) and l(max_unpool)\n k = [0]\n k.extend([i for i in range(D//2)])\n k.extend([k[-1] for _ in range(D//2, D+1)])\n l = [0 for _ in range(D//2+1)]\n l.extend([i for i in range(D+1-(D//2+1))])\n l.append(l[-1])\n\n # holes and dilations for convolution layers\n holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)]\n dilations = [i+1 for i in holes]\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(\n nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1],\n dilation=dilations[i+1]) for i in range(D)])\n self.conv.append(\n nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1]))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n\n for i in range(D//2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n\n for i in range(D//2 - 1, D//2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n for i in range(D//2 + 1, D):\n j = i - (D//2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n y = self.conv[D+1](h) + x\n return y\n", "step-ids": [ 4, 7, 8, 10, 11 ] }
[ 4, 7, 8, 10, 11 ]
#!/usr/bin/python from Tkinter import * root = Tk() root.title("Simple Graph") root.resizable(0,0) points = [] spline = 0 tag1 = "theline" def point(event): c.create_oval(event.x, event.y, event.x+1, event.y+1, fill="black", width="10.0") points.append(event.x) points.append(event.y) print(event.x) print(event.y) return points def canxy(event): print("Getting the coordinates") print event.x, event.y c.create_oval(event.x, event.y, event.x+1, event.y+1, fill="red", width="20.0") def graph(event): global theline c.create_line(points, tags="theline") def toggle(event): global spline if spline == 0: c.itemconfigure(tag1, smooth=1) spline = 1 elif spline == 1: c.itemconfigure(tag1, smooth=0) spline = 0 return spline c = Canvas(root, bg="white", width=300, height= 300) c.configure(cursor="crosshair") c.pack() c.bind("<Button-1>", point) #c.bind("<Button-3>", graph) c.bind("<Button-3>", canxy) #c.bind("<Button-2>", toggle) root.mainloop()
normal
{ "blob_id": "d88485e37d4df4cb0c8d79124d4c9c9ba18d124e", "index": 9074, "step-1": "#!/usr/bin/python\nfrom Tkinter import *\n\nroot = Tk()\n\nroot.title(\"Simple Graph\")\n\nroot.resizable(0,0)\n\npoints = []\n\nspline = 0\n\ntag1 = \"theline\"\n\ndef point(event):\n\tc.create_oval(event.x, event.y, event.x+1, event.y+1, fill=\"black\", width=\"10.0\")\n\tpoints.append(event.x)\n\tpoints.append(event.y)\n\tprint(event.x)\n\tprint(event.y)\n\treturn points\n\ndef canxy(event):\n\tprint(\"Getting the coordinates\")\n\tprint event.x, event.y\n\tc.create_oval(event.x, event.y, event.x+1, event.y+1, fill=\"red\", width=\"20.0\")\n\ndef graph(event):\n\tglobal theline\n\tc.create_line(points, tags=\"theline\")\n\t\n\ndef toggle(event):\n\tglobal spline\n\tif spline == 0:\n\t\tc.itemconfigure(tag1, smooth=1)\n\t\tspline = 1\n\telif spline == 1:\n\t\tc.itemconfigure(tag1, smooth=0)\n\t\tspline = 0\n\treturn spline\n\n\nc = Canvas(root, bg=\"white\", width=300, height= 300)\n\nc.configure(cursor=\"crosshair\")\n\nc.pack()\n\nc.bind(\"<Button-1>\", point)\n\n#c.bind(\"<Button-3>\", graph)\nc.bind(\"<Button-3>\", canxy)\n#c.bind(\"<Button-2>\", toggle)\n\nroot.mainloop()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
txt = './KF_neko.txt.mecab' mapData = {} listData = [] with open('./KF31.txt', 'w') as writeFile: with open(txt, 'r') as readFile: for text in readFile: # print(text) # \tで区切って先頭だけ見る listData = text.split('\t') # 表層形 surface = listData[0] # EOSが入ってたら消す if surface == 'EOS\n': surface = '' # print(surface) # 表層形以外をバラす splitted = listData[-1].split(',') # EOSが入ってたら消す if splitted == 'EOS\n': continue else: # 品詞 pos = splitted[0] if pos in ('動詞'): dousiSurface = surface writeFile.write(dousiSurface+'\n')
normal
{ "blob_id": "778ee9a0ea7f57535b4de88a38cd741f2d46e092", "index": 6966, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n listData = text.split('\\t')\n surface = listData[0]\n if surface == 'EOS\\n':\n surface = ''\n splitted = listData[-1].split(',')\n if splitted == 'EOS\\n':\n continue\n else:\n pos = splitted[0]\n if pos in '動詞':\n dousiSurface = surface\n writeFile.write(dousiSurface + '\\n')\n", "step-3": "txt = './KF_neko.txt.mecab'\nmapData = {}\nlistData = []\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n listData = text.split('\\t')\n surface = listData[0]\n if surface == 'EOS\\n':\n surface = ''\n splitted = listData[-1].split(',')\n if splitted == 'EOS\\n':\n continue\n else:\n pos = splitted[0]\n if pos in '動詞':\n dousiSurface = surface\n writeFile.write(dousiSurface + '\\n')\n", "step-4": "txt = './KF_neko.txt.mecab'\nmapData = {}\nlistData = []\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n # print(text)\n # \\tで区切って先頭だけ見る\n listData = text.split('\\t')\n # 表層形\n surface = listData[0]\n # EOSが入ってたら消す\n if surface == 'EOS\\n':\n surface = ''\n # print(surface)\n # 表層形以外をバラす\n splitted = listData[-1].split(',')\n # EOSが入ってたら消す\n if splitted == 'EOS\\n':\n continue\n else:\n # 品詞\n pos = splitted[0]\n if pos in ('動詞'):\n dousiSurface = surface\n writeFile.write(dousiSurface+'\\n')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
n, m = map(int, input().split()) li = list(map(int, input().split())) max = 0 for i in range(0, n): for j in range(i+1, n): for k in range(j+1, n): tmp = li[i] + li[j] + li[k] if(tmp <= m and max < tmp): max = tmp print(max)
normal
{ "blob_id": "83d0a32ef2d365d17caa9d311c367ed5828559ac", "index": 4153, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(0, n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n tmp = li[i] + li[j] + li[k]\n if tmp <= m and max < tmp:\n max = tmp\nprint(max)\n", "step-3": "n, m = map(int, input().split())\nli = list(map(int, input().split()))\nmax = 0\nfor i in range(0, n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n tmp = li[i] + li[j] + li[k]\n if tmp <= m and max < tmp:\n max = tmp\nprint(max)\n", "step-4": "n, m = map(int, input().split())\nli = list(map(int, input().split()))\n\nmax = 0\nfor i in range(0, n):\n for j in range(i+1, n):\n for k in range(j+1, n):\n tmp = li[i] + li[j] + li[k]\n if(tmp <= m and max < tmp):\n max = tmp\n\nprint(max)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding=UTF-8 """ View for managing accounts """ from django.contrib import messages from django.http import Http404, HttpResponse from django.shortcuts import redirect from django import forms from athena.core import render_to_response from athena.users.models import User from athena.users import must_be_admin def klist(**kwargs): kwargs.update({ 'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()], 'admins': User.objects.filter(status=2), }) return kwargs @must_be_admin def list(request): return render_to_response('radmin/manage_accounts_list.html', request, **klist()) @must_be_admin def account(request, account_id): try: acc = User.objects.get(id=int(account_id)) except: raise Http404 class AccountBaseForm(forms.ModelForm): class Meta: model = User fields = ['name', 'surname', 'number'] widgets = { 'name': forms.TextInput(), 'surname': forms.TextInput(), } if request.method == 'POST': form = AccountBaseForm(request.POST, instance=acc) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, u'Zapisano.') else: form = AccountBaseForm(instance=acc) if acc.status != 0: return render_to_response('radmin/manage_accounts_acc.html', request, **klist( account=acc, selected_user_id=acc.id, form=form)) else: return render_to_response('radmin/manage_accounts_students_acc.html', request, account=acc, selected_user_id=acc.id, form=form, page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1)) @must_be_admin def reset_pwd(request, account_id): if request.method != 'POST': return HttpResponse(status=400) try: acc = User.objects.get(id=int(account_id)) except: raise Http404 from random import choice randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)]) acc.set_password(randompass) messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, )) return redirect('/admin/accounts/%s/' % (acc.id, )) @must_be_admin def su(request, account_id): """Login as this user""" if request.method != 'POST': return HttpResponse(status=400) try: acc = User.objects.get(id=int(account_id)) except: raise Http404 request.logout() request.login(acc.login) messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, )) return redirect('/') @must_be_admin def delete(request, account_id): if request.method != 'POST': return HttpResponse(status=400) try: acc = User.objects.get(id=int(account_id)) except: raise Http404 if acc.login in ('[email protected]', '[email protected]', '[email protected]'): messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego') return redirect('/admin/accounts/%s/' % (acc.id, )) if acc.status == 1: # This is a teacher. You should reparent all of it's tests # and groups to user to [email protected] pass messages.add_message(request, messages.SUCCESS, u'Konto "%s %s" usunięte.' % (acc.name, acc.surname)) acc.delete() return redirect('/admin/accounts/') @must_be_admin def create(request): class NewAccountForm(forms.Form): _CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator')) login = forms.EmailField(label=u'E-mail') name = forms.CharField(label=u'Imię', required=False) surname = forms.CharField(label=u'Nazwisko', required=False) status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ') if request.method == 'POST': form = NewAccountForm(request.POST) if form.is_valid(): # grab a random password from random import choice randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)]) u = User(login=form.cleaned_data['login'], name=form.cleaned_data['name'], surname=form.cleaned_data['surname'], status=form.cleaned_data['status']) u.save() u.set_password(randompass) messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, )) return redirect('/admin/accounts/%s/' % (u.id, )) else: form = NewAccountForm() return render_to_response('radmin/manage_accounts_add.html', request, **klist( selected_user_id='create', form=form)) from django.core.paginator import Paginator @must_be_admin def view_students(request, page='1'): page = int(page) students = User.objects.filter(status=0).order_by('surname', 'name') students = [x for x in students if not x.is_demo()] p = Paginator(students, 30) cpage = p.page(page) return render_to_response('radmin/manage_accounts_students_list.html', request, page=cpage)
normal
{ "blob_id": "a01ca49c3fa8ea76de2880c1b04bf15ccd341edd", "index": 924, "step-1": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n<mask token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n", "step-2": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n<mask token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n", "step-3": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n", "step-4": "<mask token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<mask token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n", "step-5": "# coding=UTF-8\n\"\"\"\nView for managing accounts\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django import forms\nfrom athena.core import render_to_response\nfrom athena.users.models import User\nfrom athena.users import must_be_admin\n\n\ndef klist(**kwargs):\n kwargs.update({\n 'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()],\n 'admins': User.objects.filter(status=2),\n })\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request, **klist())\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n class AccountBaseForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {\n 'name': forms.TextInput(),\n 'surname': forms.TextInput(),\n }\n\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n\n else:\n form = AccountBaseForm(instance=acc)\n\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html', request, **klist(\n account=acc,\n selected_user_id=acc.id,\n form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html', request,\n account=acc,\n selected_user_id=acc.id,\n form=form,\n page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n acc.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n request.logout()\n request.login(acc.login)\n\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, ))\n\n return redirect('/')\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n if acc.login in ('[email protected]', '[email protected]', '[email protected]'):\n messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n if acc.status == 1:\n # This is a teacher. You should reparent all of it's tests\n # and groups to user to [email protected]\n pass\n\n messages.add_message(request, messages.SUCCESS, u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n\n acc.delete()\n\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n class NewAccountForm(forms.Form):\n _CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator'))\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False) \n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n\n if form.is_valid():\n\n # grab a random password\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n u = User(login=form.cleaned_data['login'],\n name=form.cleaned_data['name'],\n surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (u.id, ))\n\n else:\n form = NewAccountForm()\n\n return render_to_response('radmin/manage_accounts_add.html', request, **klist(\n selected_user_id='create',\n form=form))\n\nfrom django.core.paginator import Paginator\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n\n cpage = p.page(page)\n\n return render_to_response('radmin/manage_accounts_students_list.html', request,\n page=cpage)", "step-ids": [ 3, 6, 7, 8, 10 ] }
[ 3, 6, 7, 8, 10 ]
#!/usr/bin/env python from __future__ import print_function from __future__ import division from __future__ import absolute_import # Workaround for segmentation fault for some versions when ndimage is imported after tensorflow. import scipy.ndimage as nd import argparse import numpy as np from pybh import tensorpack_utils import data_record from pybh import serialization from pybh import msgpack_utils from pybh import lmdb_utils from pybh.utils import argparse_bool, logged_time_measurement from pybh import log_utils logger = log_utils.get_logger("reward_learning/split_data_lmdb") def dict_from_dataflow_generator(df): for sample in df.get_data(): yield sample[0] def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1, batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None): data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle) data_dict_df.reset_state() assert(split_ratio1 > 0) assert(split_ratio1 < 1) num_samples = data_dict_df.size() if max_num_samples is not None and max_num_samples > 0: num_samples = min(num_samples, max_num_samples) num_batches = num_samples // batch_size num_batches1 = round(split_ratio1 * num_samples) // batch_size num_samples1 = num_batches1 * batch_size num_batches2 = num_batches - num_batches1 num_samples2 = num_batches2 * batch_size if num_samples1 <= 0 or num_samples2 <= 0: import sys sys.stderr.write("Data split will result in empty data set\n") sys.exit(1) logger.info("Splitting {} samples into {} train and {} test samples".format(num_samples, num_samples1, num_samples2)) if num_samples > num_samples1 + num_samples2: logger.warn("Dropping {} samples from input dataset".format(num_samples - num_samples1 - num_samples2)) fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True) with logged_time_measurement(logger, "Writing train dataset to {} ...".format(lmdb_output_path1), log_start=True): tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size, write_frequency=10, serialization_name=serialization_name, compression=compression, compression_arg=compression_arg) fixed_size_df.set_size(num_samples2) with logged_time_measurement(logger, "Writing test dataset to {} ...".format(lmdb_output_path2), log_start=True): tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size, write_frequency=10, serialization_name=serialization_name, compression=compression, compression_arg=compression_arg, reset_df_state=False) logger.info("Tagging as train and test") with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db: lmdb_db.put_item("__train__", msgpack_utils.dumps(True)) with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db: lmdb_db.put_item("__test__", msgpack_utils.dumps(True)) lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1) assert(lmdb_df.size() == num_samples1) lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2) assert(lmdb_df.size() == num_samples2) def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name): with logged_time_measurement(logger, "Computing data statistics for {}".format(lmdb_path), log_start=True): lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path) lmdb_df.reset_state() data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df)) # TODO: Hack to get rid of float64 in HDF5 dataset for key in data_stats_dict: for key2 in data_stats_dict[key]: if data_stats_dict[key][key2] is not None: data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32) serializer = serialization.get_serializer_by_name(serialization_name) logger.info("Writing data statistics to {}".format(lmdb_path)) with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db: data_stats_dump = serializer.dumps(data_stats_dict) lmdb_db.put_item("__stats__", data_stats_dump) def run(args): split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle, args.serialization, args.compression, args.compression_arg, args.max_num_samples) if args.compute_stats: compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization) compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization) def main(): np.set_printoptions(threshold=5) parser = argparse.ArgumentParser(description=None) parser.add_argument('-v', '--verbose', action='count', default=0, help='Set verbosity level.') parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.') parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.') parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.') parser.add_argument('--shuffle', type=argparse_bool, default=True) parser.add_argument('--serialization', type=str, default="pickle") parser.add_argument('--compression', type=str, default="lz4") parser.add_argument('--compression-arg', type=str) parser.add_argument('--split-ratio1', default=0.8, type=float, help="Ratio of data to write to output path 1") parser.add_argument('--batch-size', type=int, default=512) parser.add_argument('--compute-stats', type=argparse_bool, default=True) parser.add_argument('--max-num-samples', type=int) args = parser.parse_args() run(args) if __name__ == '__main__': main()
normal
{ "blob_id": "a283fd1e4098ea8bb3cc3580438c90e5932ba22f", "index": 5852, "step-1": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport scipy.ndimage as nd\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.\nimport scipy.ndimage as nd\n\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\n\n\nlogger = log_utils.get_logger(\"reward_learning/split_data_lmdb\")\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,\n batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)\n data_dict_df.reset_state()\n\n assert(split_ratio1 > 0)\n assert(split_ratio1 < 1)\n\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write(\"Data split will result in empty data set\\n\")\n sys.exit(1)\n\n logger.info(\"Splitting {} samples into {} train and {} test samples\".format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn(\"Dropping {} samples from input dataset\".format(num_samples - num_samples1 - num_samples2))\n\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)\n with logged_time_measurement(logger, \"Writing train dataset to {} ...\".format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg)\n\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, \"Writing test dataset to {} ...\".format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg,\n reset_df_state=False)\n\n logger.info(\"Tagging as train and test\")\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__train__\", msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__test__\", msgpack_utils.dumps(True))\n\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert(lmdb_df.size() == num_samples1)\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert(lmdb_df.size() == num_samples2)\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, \"Computing data statistics for {}\".format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))\n\n # TODO: Hack to get rid of float64 in HDF5 dataset\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)\n\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info(\"Writing data statistics to {}\".format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item(\"__stats__\", data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,\n args.split_ratio1, args.batch_size,\n args.shuffle, args.serialization,\n args.compression, args.compression_arg,\n args.max_num_samples)\n\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count',\n default=0, help='Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default=\"pickle\")\n parser.add_argument('--compression', type=str, default=\"lz4\")\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\"Ratio of data to write to output path 1\")\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n\n args = parser.parse_args()\n\n run(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
# -*- encoding: utf-8 -*- """ views: vistas sistema recomendador @author Camilo Ramírez @contact [email protected] [email protected] @camilortte on Twitter @copyright Copyright 2014-2015, RecomendadorUD @license GPL @date 2014-10-10 @satus Pre-Alpha @version= 0..215 """ from django.views.generic import TemplateView from apps.recommender_system.models import EstablecimientosRecommender from apps.establishment_system.models import Establecimiento from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from apps.externals.djangoratings.models import Vote class RecomendacionView(TemplateView): template_name = 'recommender/recomendacion.html' def get_context_data(self, **kwargs): context = super(RecomendacionView, self).get_context_data(**kwargs) #context['now'] = timezone.now() context['recomendaciones']=self.obtener_recomendacion(self.request.user) return context def obtener_recomendacion(self,user): print "Prediciendo recomendacion" recomendador_instance=EstablecimientosRecommender() recomendaciones=recomendador_instance.storage.get_recommendations_for_user(user) print recomendaciones if recomendaciones: print "Recomendando" result=[] for recomendacion in recomendaciones: result.append(recomendacion.object) recomendaciones=result recomendaciones_leng=len(recomendaciones) if recomendaciones_leng <10: query=Establecimiento.objects.all().order_by('-rating_score') for establecimiento in query: if establecimiento not in recomendaciones: if not Vote.objects.filter(object_id=establecimiento.id,user=user.id): recomendaciones.append(establecimiento) if len(recomendaciones)>=10: break else: query=Establecimiento.objects.all().order_by('-rating_score') for establecimiento in query: if establecimiento not in recomendaciones: if not Vote.objects.filter(object_id=establecimiento.id,user=user.id): recomendaciones.append(establecimiento) if len(recomendaciones)>=10: print "Se completo la lista de 10 recomendaciones" break print "No se encontraron recomendaciones" return recomendaciones @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(RecomendacionView, self).dispatch(*args, **kwargs)
normal
{ "blob_id": "c6cbd4d18363f00b73fac873ba45d6063bee7e64", "index": 3074, "step-1": "# -*- encoding: utf-8 -*-\n\"\"\"\n \n views: vistas sistema recomendador\n\n @author Camilo Ramírez\n @contact [email protected] \n [email protected]\n @camilortte on Twitter\n @copyright Copyright 2014-2015, RecomendadorUD\n @license GPL\n @date 2014-10-10\n @satus Pre-Alpha\n @version= 0..215\n\n\n\"\"\"\nfrom django.views.generic import TemplateView\nfrom apps.recommender_system.models import EstablecimientosRecommender\nfrom apps.establishment_system.models import Establecimiento\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom apps.externals.djangoratings.models import Vote\n\n\nclass RecomendacionView(TemplateView):\n template_name = 'recommender/recomendacion.html' \n\n def get_context_data(self, **kwargs): \n context = super(RecomendacionView, self).get_context_data(**kwargs)\n #context['now'] = timezone.now()\n context['recomendaciones']=self.obtener_recomendacion(self.request.user)\n return context\n\n def obtener_recomendacion(self,user):\n print \"Prediciendo recomendacion\"\n recomendador_instance=EstablecimientosRecommender()\n recomendaciones=recomendador_instance.storage.get_recommendations_for_user(user)\n print recomendaciones\n if recomendaciones:\n print \"Recomendando\"\n result=[]\n for recomendacion in recomendaciones:\n result.append(recomendacion.object)\n recomendaciones=result\n\n recomendaciones_leng=len(recomendaciones)\n if recomendaciones_leng <10:\n query=Establecimiento.objects.all().order_by('-rating_score')\n for establecimiento in query:\n if establecimiento not in recomendaciones:\n if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):\n recomendaciones.append(establecimiento)\n if len(recomendaciones)>=10:\n break\n \n else:\n query=Establecimiento.objects.all().order_by('-rating_score')\n for establecimiento in query:\n if establecimiento not in recomendaciones:\n if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):\n recomendaciones.append(establecimiento)\n if len(recomendaciones)>=10:\n print \"Se completo la lista de 10 recomendaciones\"\n break\n print \"No se encontraron recomendaciones\"\n return recomendaciones\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(RecomendacionView, self).dispatch(*args, **kwargs)\n\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# -coding: UTF-8 -*- # @Time : 2020/06/24 20:01 # @Author: Liangping_Chen # @E-mail: [email protected] import requests def http_request(url,data,token=None,method='post'): header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization':token} #判断是get请求还是post请求 if method=='get': # 发起注册&登录 result = requests.get(url, json=data, headers=header) else: result = requests.post(url, json=data, headers=header) return result.json()#return返回指定的结果 if __name__ == '__main__': login_url='http://120.78.128.25:8766/futureloan/member/login' login_data={'mobile_phone':13665929730,'pwd':'12345678'} response=http_request(login_url,login_data) print('登录的结果是:{}'.format(response)) #充值 token=response['data']['token_info']['token'] rec_url='http://120.78.128.25:8766/futureloan/member/recharge' rec_data = {'member_id': 200170, 'amount': 123456} print(http_request(rec_url,rec_data,"bearer "+token))
normal
{ "blob_id": "dd7c7fa6493a43988e1c8079797f6ff9b4d239dd", "index": 4672, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n", "step-4": "import requests\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n", "step-5": "# -coding: UTF-8 -*-\n# @Time : 2020/06/24 20:01\n# @Author: Liangping_Chen\n# @E-mail: [email protected]\n\nimport requests\ndef http_request(url,data,token=None,method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2',\n 'Authorization':token}\n #判断是get请求还是post请求\n if method=='get':\n # 发起注册&登录\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n\n return result.json()#return返回指定的结果\nif __name__ == '__main__':\n\n login_url='http://120.78.128.25:8766/futureloan/member/login'\n login_data={'mobile_phone':13665929730,'pwd':'12345678'}\n response=http_request(login_url,login_data)\n print('登录的结果是:{}'.format(response))\n\n #充值\n token=response['data']['token_info']['token']\n rec_url='http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url,rec_data,\"bearer \"+token))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
lista = [2, 3.2, 4, 52, 6.25] s = sum(lista) print(s)
normal
{ "blob_id": "05aa8eac846154024d25d639da565135e41403c2", "index": 9611, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(s)\n", "step-3": "lista = [2, 3.2, 4, 52, 6.25]\ns = sum(lista)\nprint(s)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
''' Statistics models module. This module contains the database models for the Statistics class and the StatisticsCategory class. @author Hubert Ngu @author Jason Hou ''' from django.db import models class Statistics(models.Model): ''' Statistics model class. This represents a single tuple in the statitics_generator_statistics table in the database. ''' number_surveys = models.IntegerField() number_listings = models.IntegerField() number_buyer_surveys = models.IntegerField() number_seller_surveys = models.IntegerField() number_buyer_listings = models.IntegerField() number_seller_listings = models.IntegerField() average_transaction_amount = models.FloatField() buyer_transaction_amount = models.FloatField() seller_transaction_amount = models.FloatField() successful_transaction_amount = models.FloatField() average_transaction_time = models.IntegerField() buyer_transaction_success_rate = models.FloatField() seller_transaction_success_rate = models.FloatField() total_transaction_success_rate = models.FloatField() class StatisticsCategory(models.Model): ''' StatisticsCategory model class. This represents a single tuple in the statitics_generator_statisticscategory table in the database. ''' statistics_id = models.IntegerField() category = models.CharField(max_length=30) survey_count = models.IntegerField() buyer_count = models.IntegerField() seller_count = models.IntegerField() amount = models.IntegerField()
normal
{ "blob_id": "728f9402b3ce4b297be82b3ba1a17c4180ac7c0d", "index": 8839, "step-1": "<mask token>\n\n\nclass Statistics(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n", "step-2": "<mask token>\n\n\nclass Statistics(models.Model):\n <mask token>\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n", "step-3": "<mask token>\n\n\nclass Statistics(models.Model):\n \"\"\"\n\tStatistics model class. This represents a single tuple in the\n\tstatitics_generator_statistics table in the database.\n\t\"\"\"\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n", "step-4": "<mask token>\nfrom django.db import models\n\n\nclass Statistics(models.Model):\n \"\"\"\n\tStatistics model class. This represents a single tuple in the\n\tstatitics_generator_statistics table in the database.\n\t\"\"\"\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n", "step-5": "'''\r\n Statistics models module. This module contains the database models for the\r\n Statistics class and the StatisticsCategory class.\r\n\r\n @author Hubert Ngu\r\n @author Jason Hou\r\n'''\r\n\r\nfrom django.db import models\r\n\r\nclass Statistics(models.Model):\r\n\t'''\r\n\tStatistics model class. This represents a single tuple in the\r\n\tstatitics_generator_statistics table in the database.\r\n\t'''\r\n\tnumber_surveys = models.IntegerField()\r\n\tnumber_listings = models.IntegerField()\r\n\tnumber_buyer_surveys = models.IntegerField()\r\n\tnumber_seller_surveys = models.IntegerField()\r\n\tnumber_buyer_listings = models.IntegerField()\r\n\tnumber_seller_listings = models.IntegerField()\r\n\taverage_transaction_amount = models.FloatField()\r\n\tbuyer_transaction_amount = models.FloatField()\r\n\tseller_transaction_amount = models.FloatField()\r\n\tsuccessful_transaction_amount = models.FloatField()\r\n\taverage_transaction_time = models.IntegerField()\r\n\tbuyer_transaction_success_rate = models.FloatField()\r\n\tseller_transaction_success_rate = models.FloatField()\r\n\ttotal_transaction_success_rate = models.FloatField()\r\n\r\nclass StatisticsCategory(models.Model):\r\n\t'''\r\n\tStatisticsCategory model class. This represents a single tuple in the\r\n\tstatitics_generator_statisticscategory table in the database.\r\n\t'''\r\n\tstatistics_id = models.IntegerField()\r\n\tcategory = models.CharField(max_length=30)\r\n\tsurvey_count = models.IntegerField()\r\n\tbuyer_count = models.IntegerField()\r\n\tseller_count = models.IntegerField()\r\n\tamount = models.IntegerField()", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False) def fully_connected(prev_layer, num_units, batch_norm, is_training=False): layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None) if batch_norm: layer = tf.layers.batch_normalization(layer, training=is_training) layer = tf.nn.relu(layer) return layer def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False): if layer_depth % 3 == 0: strides = 2 else: strides = 1 conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None) if batch_norm: conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training) conv_layer = tf.nn.relu(conv_layer) return conv_layer num_batches = 3000 batch_size = 128 learning_rate = 0.002 layer_num = 5 batch_norm = True inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) layer = inputs for layer_i in range(1, 1+layer_num): layer = conv_layer(layer, layer_i, batch_norm, is_training) orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) layer = fully_connected(layer, 100, batch_norm, is_training) logits = tf.layers.dense(layer, 10) model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) tf.summary.scalar('conv_loss',model_loss) if batch_norm: with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): #train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss) #train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) else: train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss) #train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss) #train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as sess: merged = tf.summary.merge_all() if batch_norm: logdir = "mnist/conv/SGD_batchnorm" else: logdir = "mnist/conv/SGD_no_batchnorm" writer = tf.summary.FileWriter(logdir, sess.graph) sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _,summary = sess.run([train_opt,merged], {inputs: batch_xs, labels: batch_ys, is_training: True}) writer.add_summary(summary, batch_i) if batch_i % 500 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels,is_training: False}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels,is_training: False}) print('Final test accuracy: {:>3.5f}'.format(acc))
normal
{ "blob_id": "17b3f51779bda5a48c4d77c35d6bbdd2aadb13cd", "index": 1432, "step-1": "<mask token>\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\n<mask token>\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\n<mask token>\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\n<mask token>\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n", "step-3": "<mask token>\nmnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\nlayer = inputs\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\norig_shape = layer.get_shape().as_list()\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *\n orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=labels))\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n", "step-4": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\nlayer = inputs\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\norig_shape = layer.get_shape().as_list()\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *\n orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=labels))\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n", "step-5": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True, reshape=False)\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n\tif layer_depth % 3 == 0:\n\t strides = 2\n\telse:\n\t\tstrides = 1\n\tconv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)\n\tif batch_norm:\n\t\tconv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n\tconv_layer = tf.nn.relu(conv_layer)\n\treturn conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\n\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\n\nlayer = inputs\nfor layer_i in range(1, 1+layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\n\norig_shape = layer.get_shape().as_list()\n\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\n\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\ntf.summary.scalar('conv_loss',model_loss)\n\nif batch_norm: \n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n #train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)\n\t\t#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)\n\t#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)\n\t#train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n\ncorrect_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n\nwith tf.Session() as sess:\n\tmerged = tf.summary.merge_all()\n\tif batch_norm: \n\t\tlogdir = \"mnist/conv/SGD_batchnorm\"\n\telse:\n\t\tlogdir = \"mnist/conv/SGD_no_batchnorm\"\n\twriter = tf.summary.FileWriter(logdir, sess.graph)\n\n\tsess.run(tf.global_variables_initializer())\n\tfor batch_i in range(num_batches):\n\t\tbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n\t\t_,summary = sess.run([train_opt,merged], {inputs: batch_xs, labels: batch_ys, is_training: True})\n\t\t\n\t\twriter.add_summary(summary, batch_i)\n\n\t\tif batch_i % 500 == 0:\n\t\t\tloss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False})\n\t\t\tprint('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\t\telif batch_i % 100 == 0:\n\t\t\tloss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})\n\t\t\tprint('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n\tacc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels,is_training: False})\n\tprint('Final validation accuracy: {:>3.5f}'.format(acc))\n\tacc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels,is_training: False})\n\tprint('Final test accuracy: {:>3.5f}'.format(acc))", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
#!/usr/bin/env python # coding:utf-8 """ 200. 岛屿数量 难度 中等 给定一个由 '1'(陆地)和 '0'(水)组成的的二维网格,计算岛屿的数量。一个岛被水包围,并且它是通过水平方向或垂直方向上相邻的陆地连接而成的。你可以假设网格的四个边均被水包围。 示例 1: 输入: 11110 11010 11000 00000 输出: 1 示例 2: 输入: 11000 11000 00100 00011 输出: 3 """ # ================================================================================ """ 洪水填充算法(Flood Fill Algorithm) """ """ Flood Fill 算法 是从一个区域中提取若干个连通的点与其他相邻区域区分开(或分别染成不同颜色)的经典算法。 因为其思路类似洪水从一个区域扩散到所有能到达的区域而得名。 从一个点扩散开,找到与其连通的点,这不是什么高深的算法, 其实就是从一个点开始,进行一次“深度优先遍历”或者“广度优先遍历”, 通过“深度优先遍历”或者“广度优先遍历”发现一片连着的区域, 对于这道题来说, 就是从一个是“陆地”的格子开始进行一次“深度优先遍历”或者“广度优先遍历”, 把与之相连的所有的格子都标记上,视为发现了一个“岛屿”。 说明: 那么每一次进行“深度优先遍历”或者“广度优先遍历”的条件就是: 1、这个格子是陆地(“1”),如果是水域(“0”)就无从谈论“岛屿”; 2、这个格子不能是之前发现“岛屿”的过程中执行了“深度优先遍历”或者“广度优先遍历”操作,而被标记的格子。 """ # ================================================================================ """ 思路: DFS(深度优先遍历) (回溯) 时间复杂度: O() 空间复杂度: O() """ class Solution(object): # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量 """ x-1,y x,y-1 x,y x,y+1 x+1,y """ # 这4个方向的顺序无关紧要 # 此处的方向顺序:上、右、下、左 directions = [(-1, 0), (0, 1), (1, 0), (0, -1)] def dfs(self, matrix, i, j, m, n, visited): """ 深度优先遍历 """ visited[i][j] = True # print '(%s,%s)' % (i, j) for direction in self.directions: new_i = i + direction[0] new_j = j + direction[1] """ 对下一个格子,执行 DFS 的条件: 1.横坐标在网格内 2.纵坐标在网格内 3.该格子没有被遍历过 4.该格子是陆地 """ if 0<=new_i<=m-1 \ and 0<=new_j<=n-1 \ and not visited[new_i][new_j] \ and matrix[new_i][new_j] == '1': self.dfs(matrix, new_i, new_j, m, n, visited) pass pass def numIslands(self, grid): """ :type grid: List[List[str]] :rtype: int """ if len(grid) == 0: return 0 m = len(grid) n = len(grid[0]) # 孤岛计数 island_count = 0 # 已访问过的记录矩阵 matrix_visited = [[False for _ in range(n)] for _ in range(m)] """ 从 (0,0) 开始,对每个格子尝试一次 DFS 操作 """ for i in range(m): for j in range(n): # 只要是陆地,且没有被访问过的,就可以使用 DFS 发现与之相连的陆地,并进行标记 if grid[i][j] == '1' and not matrix_visited[i][j]: self.dfs(grid, i, j, m, n, matrix_visited) # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!! island_count += 1 # print 'island_count:', island_count pass pass pass return island_count # ================================================================================ """ 思路: BFS(广度优先遍历) (需要一个辅助队列) 时间复杂度: O() 空间复杂度: O() """ class Solution(object): # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量 """ x-1,y x,y-1 x,y x,y+1 x+1,y """ # 这4个方向的顺序无关紧要 # 此处的方向顺序:上、右、下、左 directions = [(-1, 0), (0, 1), (1, 0), (0, -1)] def numIslands(self, grid): """ :type grid: List[List[str]] :rtype: int """ if len(grid) == 0: return 0 m = len(grid) n = len(grid[0]) # 孤岛计数 island_count = 0 # 已访问过的记录矩阵 matrix_visited = [[False for _ in range(n)] for _ in range(m)] # 辅助队列 from collections import deque queue = deque() """ 从 (0,0) 开始,对每个格子尝试一次 BFS 操作 """ for i in range(m): for j in range(n): # 只要是陆地,且没有被访问过的,就可以使用 BFS 发现与之相连的陆地,并进行标记 if grid[i][j] == '1' and not matrix_visited[i][j]: # ------------------------------ # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!! island_count += 1 # print 'island_count: ', island_count matrix_visited[i][j] = True # print '(%s,%s)' % (i, j) queue.append((i, j)) # ------------------------------ while queue: x, y = queue.popleft() # 依次检查 4 个方向的邻居 for direction in self.directions: new_i = x + direction[0] new_j = y + direction[1] """ 标记该格子已被访问,并且入队列的条件: 1.横坐标在网格内 2.纵坐标在网格内 3.该格子没有被遍历过 4.该格子是陆地 """ if 0 <= new_i <= m - 1 \ and 0 <= new_j <= n - 1 \ and not matrix_visited[new_i][new_j] \ and grid[new_i][new_j] == '1': # 标记已访问 matrix_visited[new_i][new_j] = True # print '(%s,%s)' % (new_i, new_j) # 加入队列 queue.append((new_i, new_j)) pass pass # ------------------------------ pass pass pass return island_count # ================================================================================ # ================================================================================ # ================================================================================ # ================================================================================ gggg = [['1', '1', '1', '1', '0'], ['1', '1', '0', '1', '0'], ['1', '1', '0', '0', '0'], ['0', '0', '0', '0', '0']] solution = Solution() result = solution.numIslands(gggg) print(result)
normal
{ "blob_id": "b46f19708e9e2a1be2bbd001ca6341ee7468a60d", "index": 7147, "step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n <mask token>\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[\n new_i][new_j] and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n island_count += 1\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[\n new_i][new_j] and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n island_count += 1\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\nprint(result)\n", "step-5": "#!/usr/bin/env python\n# coding:utf-8\n\n\"\"\"\n200. 岛屿数量\n难度\n中等\n\n给定一个由 '1'(陆地)和 '0'(水)组成的的二维网格,计算岛屿的数量。一个岛被水包围,并且它是通过水平方向或垂直方向上相邻的陆地连接而成的。你可以假设网格的四个边均被水包围。\n\n示例 1:\n\n输入:\n11110\n11010\n11000\n00000\n\n输出: 1\n示例 2:\n\n输入:\n11000\n11000\n00100\n00011\n\n输出: 3\n\"\"\"\n# ================================================================================\n\"\"\"\n洪水填充算法(Flood Fill Algorithm)\n\"\"\"\n\"\"\"\nFlood Fill 算法\n\n是从一个区域中提取若干个连通的点与其他相邻区域区分开(或分别染成不同颜色)的经典算法。\n因为其思路类似洪水从一个区域扩散到所有能到达的区域而得名。\n\n从一个点扩散开,找到与其连通的点,这不是什么高深的算法,\n其实就是从一个点开始,进行一次“深度优先遍历”或者“广度优先遍历”,\n通过“深度优先遍历”或者“广度优先遍历”发现一片连着的区域,\n\n对于这道题来说,\n就是从一个是“陆地”的格子开始进行一次“深度优先遍历”或者“广度优先遍历”,\n把与之相连的所有的格子都标记上,视为发现了一个“岛屿”。\n\n说明:\n那么每一次进行“深度优先遍历”或者“广度优先遍历”的条件就是:\n1、这个格子是陆地(“1”),如果是水域(“0”)就无从谈论“岛屿”;\n2、这个格子不能是之前发现“岛屿”的过程中执行了“深度优先遍历”或者“广度优先遍历”操作,而被标记的格子。\n\"\"\"\n# ================================================================================\n\"\"\"\n思路:\n DFS(深度优先遍历)\n (回溯)\n时间复杂度:\n O()\n空间复杂度: \n O()\n\"\"\"\n\n\nclass Solution(object):\n # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n # 这4个方向的顺序无关紧要\n # 此处的方向顺序:上、右、下、左\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n # print '(%s,%s)' % (i, j)\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0<=new_i<=m-1 \\\n and 0<=new_j<=n-1 \\\n and not visited[new_i][new_j] \\\n and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n # 孤岛计数\n island_count = 0\n # 已访问过的记录矩阵\n matrix_visited = [[False for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n # 只要是陆地,且没有被访问过的,就可以使用 DFS 发现与之相连的陆地,并进行标记\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n island_count += 1\n # print 'island_count:', island_count\n pass\n pass\n pass\n return island_count\n\n# ================================================================================\n\"\"\"\n思路:\n BFS(广度优先遍历)\n (需要一个辅助队列)\n时间复杂度:\n O()\n空间复杂度: \n O()\n\"\"\"\n\n\nclass Solution(object):\n # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n # 这4个方向的顺序无关紧要\n # 此处的方向顺序:上、右、下、左\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n # 孤岛计数\n island_count = 0\n # 已访问过的记录矩阵\n matrix_visited = [[False for _ in range(n)] for _ in range(m)]\n # 辅助队列\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n # 只要是陆地,且没有被访问过的,就可以使用 BFS 发现与之相连的陆地,并进行标记\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n # ------------------------------\n # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n island_count += 1\n # print 'island_count: ', island_count\n matrix_visited[i][j] = True\n # print '(%s,%s)' % (i, j)\n queue.append((i, j))\n # ------------------------------\n while queue:\n x, y = queue.popleft()\n # 依次检查 4 个方向的邻居\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 \\\n and 0 <= new_j <= n - 1 \\\n and not matrix_visited[new_i][new_j] \\\n and grid[new_i][new_j] == '1':\n # 标记已访问\n matrix_visited[new_i][new_j] = True\n # print '(%s,%s)' % (new_i, new_j)\n # 加入队列\n queue.append((new_i, new_j))\n pass\n pass\n # ------------------------------\n pass\n pass\n pass\n return island_count\n\n\n# ================================================================================\n# ================================================================================\n# ================================================================================\n# ================================================================================\n\n\ngggg = [['1', '1', '1', '1', '0'],\n ['1', '1', '0', '1', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '0', '0', '0']]\nsolution = Solution()\nresult = solution.numIslands(gggg)\nprint(result)\n", "step-ids": [ 3, 5, 7, 10, 12 ] }
[ 3, 5, 7, 10, 12 ]
import os import platform import _winreg def gid(x): find=x winreg = _winreg REG_PATH1 = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall" REG_PATH2 = r"SOFTWARE\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall" registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH1, 0, winreg.KEY_READ) winreg.CloseKey(registry_key) name = [] string=[] registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH1, 0, winreg.KEY_READ) i=0 while True: try: sub_registry_key = winreg.EnumKey(registry_key, i) newpath1 = REG_PATH1 + '\\' + sub_registry_key new_registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, newpath1, 0, winreg.KEY_READ) try: DisplayName, getname = winreg.QueryValueEx(new_registry_key, 'DisplayName') UninstallString, getname = winreg.QueryValueEx(new_registry_key, 'UninstallString') winreg.CloseKey(new_registry_key) name.append(DisplayName) string.append( UninstallString ) except: pass i += 1 except: break registry_key1 = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH2, 0, winreg.KEY_READ) ii=0 while True: try: sub_registry_key1 = winreg.EnumKey(registry_key1, ii) newpath2 = REG_PATH2 + '\\' + sub_registry_key1 new_registry_key1 = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, newpath2, 0, winreg.KEY_READ) try: DisplayName1, getname = winreg.QueryValueEx(new_registry_key1, 'DisplayName') DisplayVersion1, getname = winreg.QueryValueEx(new_registry_key1, 'DisplayVersion') UninstallString1, getname = winreg.QueryValueEx(new_registry_key1, 'UninstallString') winreg.CloseKey(new_registry_key1) name.append(DisplayName1) string.append(UninstallString1 ) except: pass ii += 1 except: break try: registry_key2 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, REG_PATH1, 0, winreg.KEY_READ) iii=0 while True: try: sub_registry_key2 = winreg.EnumKey(registry_key2, iii) newpath3 = REG_PATH1 + '\\' + sub_registry_key2 new_registry_key2 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, newpath3, 0, winreg.KEY_READ) try: DisplayName2, getname = winreg.QueryValueEx(new_registry_key2, 'DisplayName') UninstallString2, getname = winreg.QueryValueEx(new_registry_key2, 'UninstallString') winreg.CloseKey(new_registry_key2) name.append( DisplayName2) string.append(UninstallString2 ) except: pass iii += 1 except: break except: pass try: registry_key3 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, REG_PATH2, 0, winreg.KEY_READ) iiii=0 while True: try: sub_registry_key3 = winreg.EnumKey(registry_key3, iiii) newpath4 = REG_PATH2 + '\\' + sub_registry_key3 new_registry_key3 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, newpath4, 0, winreg.KEY_READ) try: DisplayName3, getname = winreg.QueryValueEx(new_registry_key3, 'DisplayName') UninstallString3, getname = winreg.QueryValueEx(new_registry_key3, 'UninstallString') winreg.CloseKey(new_registry_key3) name.append( DisplayName3 ) string.append(UninstallString3 ) except: pass iiii += 1 except: break except: pass out={} for i in name: if find.lower() in i.lower(): x=i for k,v in zip(name,string): out[k] = v x1=out[x] if x1: cmd=x1+' /quiet REBOOT=ReallySuppress REMOVE=ALL' os.popen(cmd).read() def uni(): arch=platform.machine() if 'AMD64' in arch: if os.path.exists(os.environ['PROGRAMFILES(X86)']): if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes' Anti-Malware")): os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes' Anti-Malware")) print "\n\t*)Malwarebytes Anti-Malware Uninstallation started......" out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes Anti-Malware")): os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes Anti-Malware")) print "\n\t*)Malwarebytes Anti-Malware Uninstallation started......" out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) else: if os.path.exists(os.environ['PROGRAMFILES']): if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes' Anti-Malware")): print "\n\t*)Malwarebytes Anti-Malware Uninstallation started......" os.chdir(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes' Anti-Malware")) out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes Anti-Malware")): os.chdir(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes Anti-Malware")) print "\n\t*)Malwarebytes Anti-Malware Uninstallation started......" out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) def uni2(): arch=platform.machine() if 'AMD64' in arch: if os.path.exists(os.environ['PROGRAMFILES(X86)']): if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes Anti-Exploit")): print "\n\t*)Malwarebytes Anti-Exploit Uninstallation started......" os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],"Malwarebytes Anti-Exploit")) out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) else: if os.path.exists(os.environ['PROGRAMFILES']): if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes Anti-Exploit")): os.chdir(os.path.join(os.environ['PROGRAMFILES'],"Malwarebytes Anti-Exploit")) print "\n\t*)Malwarebytes Anti-Exploit Uninstallation started......" out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) def uni3(): if os.path.exists(os.environ['PROGRAMFILES']): if os.path.exists('C:\Program Files\Malwarebytes\Anti-Malware'): os.chdir('C:\Program Files\Malwarebytes\Anti-Malware') print "\n\t*)Malwarebytes Uninstallation started......" out=os.popen("unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART").read() print(out) else: print "\n\t*)Malwarebytes path not found..." def uni4(): import os import _winreg import re def check(): inst=os.popen("wmic product get name,identifyingnumber").read() return inst def reg(): blacklist=r"Malwarebytes' Managed Client" def collectprograms(rtkey,pK,kA): try: list=[] oK=_winreg.OpenKey(rtkey,pK,0,kA) i=0 while True: try: bkey=_winreg.EnumKey(oK,i) vkey=os.path.join(pK,bkey) oK1=_winreg.OpenKey(rtkey,vkey,0,kA) try: DN,bla=_winreg.QueryValueEx(oK1,'DisplayName') inlist=[DN.strip(), vkey, pK] list.append(inlist) except: pass i+=1 except: break except: pass return list uninstallkey_32='SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall' if 'PROGRAMFILES(X86)' in os.environ.keys(): rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ), (_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ), (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ), (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ)] else: rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_READ), (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_READ)] bet=[] for i in rklist: col=collectprograms(i[0], i[1], i[2]) for c in col: print c if blacklist in c: bet.append(c[1]) if not bet: print "Please Mention the valid blacklist Installed Software" else: for i in bet: print i j=i.replace(" ", '" "') v='\\' path="HKEY_LOCAL_MACHINE"+v+i path1="HKEY_LOCAL_MACHINE"+v+j got=path1 return got inst=check() if len(inst)>0: find=re.findall("{.*}\s\sMalwarebytes'\sManaged\\sClient",inst) if len(find)>0: final=re.findall('{.*}',find[0])[0] if len(final) == 38: print "\n\t*)Malwarebytes' Managed Client Uninstallation started......" cmd='msiexec.exe /x %s /quiet REBOOT=ReallySuppress REMOVE=ALL'%final os.popen(cmd).read() else: fin=reg() fina=fin.split('\\')[-1] final1=re.findall('{.*}',fina)[0] print "\n\t*)Malwarebytes' Managed Client Uninstallation started......" cmd='msiexec.exe /x %s /quiet REBOOT=ReallySuppress REMOVE=ALL'%final1 os.popen(cmd).read() def checkapp(AppName): import _winreg import os AppName = AppName.lower() def DNDS(rtkey, pK, kA): ln = [] lv = [] try: oK = _winreg.OpenKey(rtkey, pK, 0, kA) i = 0 while True: try: bkey = _winreg.EnumKey(oK, i) vkey = os.path.join(pK, bkey) oK1 = _winreg.OpenKey(rtkey, vkey, 0, kA) try: tls = [] DN, bla = _winreg.QueryValueEx(oK1, 'DisplayName') DV, bla = _winreg.QueryValueEx(oK1, 'DisplayVersion') _winreg.CloseKey(oK1) ln.append(DN) lv.append(DV) except: pass i += 1 except: break _winreg.CloseKey(oK) return zip(ln, lv) except: return zip(ln, lv) rK = _winreg.HKEY_LOCAL_MACHINE sK = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment' openedKey = _winreg.OpenKey(rK, sK, 0, _winreg.KEY_READ) arch, bla = _winreg.QueryValueEx(openedKey, 'PROCESSOR_ARCHITECTURE') arch = str(arch) _winreg.CloseKey(openedKey) if arch == 'AMD64': fList = DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_WOW64_32KEY | _winreg.KEY_READ) fList.extend(DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_WOW64_64KEY | _winreg.KEY_READ)) fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_WOW64_32KEY | _winreg.KEY_READ)) fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_WOW64_64KEY | _winreg.KEY_READ)) else: fList = DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_READ) fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall', _winreg.KEY_READ)) fList = set(fList) lr = [] rs = 0 for i in fList: a, b = i if AppName in a.lower(): lr.append('success: {} is installed'.format(a)) lr.append('{:<25}{:5}'.format(a, b)) rs += 1 else: rs += 0 if rs: return True return False def recheck(): app3=checkapp('Malwarebytes Anti-Malware') app8=checkapp('Malwarebytes Anti-Malware') if app3: print '\n\t\t*)Try again with Uninstall String' gid('Malwarebytes Anti-Malware') if app8: print "\n\t\t*)Malwarebytes Anti-Malware Uninstalled Failed...." return '0' else: print "\n\t\t*)Malwarebytes Anti-Malware Uninstalled Successfully...." return '1' else: print "\n\t*)Malwarebytes Anti-Malware Uninstalled Successfully...." return '1' def recheck1(): app4=checkapp('Malwarebytes Anti-Exploit') app9=checkapp('Malwarebytes Anti-Malware') if app4: print '\n\t\t*)Try again with Uninstall String' gid('Malwarebytes Anti-Exploit') if app9: print "\n\t\t*)Malwarebytes Anti-Exploit Uninstalled Failed...." return '0' else: print "\n\t\t*)Malwarebytes Anti-Exploit Uninstalled Successfully...." return '1' else: print "\n\t*)Malwarebytes Anti-Exploit Uninstalled Successfully...." return '1' def recheck2(): app6=checkapp('Malwarebytes version') app10=checkapp('Malwarebytes Anti-Malware') if app6: print '\n\t\t*)Try again with Uninstall String' gid('Malwarebytes version') if app10: print "\n\t\t*)Malwarebytes Uninstalled Failed...." return '0' else: print "\n\t\t*)Malwarebytes Uninstalled Successfully...." return '1' else: print "\n\t*)Malwarebytes Uninstalled Successfully...." return '1' def recheck3(): app7=checkapp("Malwarebytes' Managed Client") app11=checkapp('Malwarebytes Anti-Malware') if app7: print "\n\t*)Malwarebytes' Managed Client Uninstalled Failed...." else: print "\n\t*)Malwarebytes' Managed Client Uninstalled Successfully...." return '1' app1=checkapp('Malwarebytes Anti-Malware') app2=checkapp('Malwarebytes Anti-Exploit') app5=checkapp('Malwarebytes version') app7=checkapp("Malwarebytes' Managed Client") if app1: print "Malwarebytes Anti-Malware is Found in the system" uni() r=recheck() else: print "\nMalwarebytes Anti-Malware is not found in the system" r=1 if app2: print "\nMalwarebytes Anti-Exploit is Found in the System" uni2() r1=recheck1() else: print "\nMalwarebytes Anti-Exploit is not found in the system" r1=1 if app5: print "\nMalwarebytes is Found in the system" uni3() r2=recheck2() else: print "\nMalwarebytes is not found in the system" r2=1 if app7: print "\nMalwarebytes' Managed Client is Found in the system" uni4() r3=recheck3() else: print "\nMalwarebytes' Managed Client is not found in the system" r3=1
normal
{ "blob_id": "a444e215b64b3a2d7f736e38227b68c1a1b952a0", "index": 7133, "step-1": "import os\nimport platform\nimport _winreg\n\n\ndef gid(x):\n find=x\n winreg = _winreg\n REG_PATH1 = r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\n REG_PATH2 = r\"SOFTWARE\\WOW6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\n registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH1, 0, winreg.KEY_READ)\n winreg.CloseKey(registry_key)\n name = []\n string=[]\n\n registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH1, 0, winreg.KEY_READ)\n i=0\n\n while True:\n try:\n sub_registry_key = winreg.EnumKey(registry_key, i)\n newpath1 = REG_PATH1 + '\\\\' + sub_registry_key\n new_registry_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, newpath1, 0, winreg.KEY_READ)\n try:\n DisplayName, getname = winreg.QueryValueEx(new_registry_key, 'DisplayName')\n UninstallString, getname = winreg.QueryValueEx(new_registry_key, 'UninstallString')\n winreg.CloseKey(new_registry_key)\n name.append(DisplayName)\n string.append( UninstallString )\n except:\n pass\n i += 1\n except:\n break\n\n registry_key1 = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, REG_PATH2, 0, winreg.KEY_READ)\n ii=0\n while True:\n try:\n sub_registry_key1 = winreg.EnumKey(registry_key1, ii)\n newpath2 = REG_PATH2 + '\\\\' + sub_registry_key1\n new_registry_key1 = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, newpath2, 0, winreg.KEY_READ)\n try:\n DisplayName1, getname = winreg.QueryValueEx(new_registry_key1, 'DisplayName')\n DisplayVersion1, getname = winreg.QueryValueEx(new_registry_key1, 'DisplayVersion')\n UninstallString1, getname = winreg.QueryValueEx(new_registry_key1, 'UninstallString')\n winreg.CloseKey(new_registry_key1)\n name.append(DisplayName1)\n string.append(UninstallString1 )\n except:\n pass\n ii += 1\n except:\n break\n try:\n registry_key2 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, REG_PATH1, 0, winreg.KEY_READ)\n iii=0\n while True:\n try:\n sub_registry_key2 = winreg.EnumKey(registry_key2, iii)\n newpath3 = REG_PATH1 + '\\\\' + sub_registry_key2\n new_registry_key2 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, newpath3, 0, winreg.KEY_READ)\n try:\n DisplayName2, getname = winreg.QueryValueEx(new_registry_key2, 'DisplayName')\n UninstallString2, getname = winreg.QueryValueEx(new_registry_key2, 'UninstallString')\n winreg.CloseKey(new_registry_key2)\n name.append( DisplayName2)\n string.append(UninstallString2 )\n except:\n pass\n iii += 1\n except:\n break\n except:\n pass\n try:\n registry_key3 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, REG_PATH2, 0, winreg.KEY_READ)\n iiii=0\n while True:\n try:\n sub_registry_key3 = winreg.EnumKey(registry_key3, iiii)\n newpath4 = REG_PATH2 + '\\\\' + sub_registry_key3\n new_registry_key3 = winreg.OpenKey( winreg.HKEY_CURRENT_USER, newpath4, 0, winreg.KEY_READ)\n try:\n DisplayName3, getname = winreg.QueryValueEx(new_registry_key3, 'DisplayName')\n UninstallString3, getname = winreg.QueryValueEx(new_registry_key3, 'UninstallString')\n winreg.CloseKey(new_registry_key3)\n name.append( DisplayName3 )\n string.append(UninstallString3 )\n except:\n pass\n iiii += 1\n except:\n break\n except:\n pass\n out={}\n for i in name:\n if find.lower() in i.lower():\n x=i\n for k,v in zip(name,string):\n out[k] = v\n x1=out[x]\n \n if x1:\n cmd=x1+' /quiet REBOOT=ReallySuppress REMOVE=ALL'\n os.popen(cmd).read()\n\ndef uni():\n arch=platform.machine()\n if 'AMD64' in arch:\n if os.path.exists(os.environ['PROGRAMFILES(X86)']):\n if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes' Anti-Malware\")):\n os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes' Anti-Malware\"))\n print \"\\n\\t*)Malwarebytes Anti-Malware Uninstallation started......\" \n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes Anti-Malware\")):\n os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes Anti-Malware\"))\n print \"\\n\\t*)Malwarebytes Anti-Malware Uninstallation started......\" \n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n \n else:\n if os.path.exists(os.environ['PROGRAMFILES']):\n if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes' Anti-Malware\")):\n print \"\\n\\t*)Malwarebytes Anti-Malware Uninstallation started......\" \n os.chdir(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes' Anti-Malware\"))\n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n \n if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes Anti-Malware\")):\n os.chdir(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes Anti-Malware\"))\n print \"\\n\\t*)Malwarebytes Anti-Malware Uninstallation started......\" \n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n\n \n \ndef uni2():\n arch=platform.machine()\n if 'AMD64' in arch:\n if os.path.exists(os.environ['PROGRAMFILES(X86)']):\n if os.path.exists(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes Anti-Exploit\")):\n print \"\\n\\t*)Malwarebytes Anti-Exploit Uninstallation started......\" \n os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'],\"Malwarebytes Anti-Exploit\"))\n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n \n \n else:\n if os.path.exists(os.environ['PROGRAMFILES']):\n if os.path.exists(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes Anti-Exploit\")):\n os.chdir(os.path.join(os.environ['PROGRAMFILES'],\"Malwarebytes Anti-Exploit\"))\n print \"\\n\\t*)Malwarebytes Anti-Exploit Uninstallation started......\"\n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n\ndef uni3():\n if os.path.exists(os.environ['PROGRAMFILES']):\n if os.path.exists('C:\\Program Files\\Malwarebytes\\Anti-Malware'):\n os.chdir('C:\\Program Files\\Malwarebytes\\Anti-Malware')\n print \"\\n\\t*)Malwarebytes Uninstallation started......\" \n out=os.popen(\"unins000.exe /VERYSILENT /SUPPRESSMSGBOXES /NORESTART\").read()\n print(out)\n else:\n print \"\\n\\t*)Malwarebytes path not found...\"\n\ndef uni4():\n import os\n import _winreg\n import re\n def check():\n inst=os.popen(\"wmic product get name,identifyingnumber\").read() \n return inst\n def reg():\n blacklist=r\"Malwarebytes' Managed Client\"\n def collectprograms(rtkey,pK,kA):\n try:\n list=[]\n oK=_winreg.OpenKey(rtkey,pK,0,kA)\n i=0\n while True:\n try:\n bkey=_winreg.EnumKey(oK,i)\n vkey=os.path.join(pK,bkey)\n oK1=_winreg.OpenKey(rtkey,vkey,0,kA)\n try:\n DN,bla=_winreg.QueryValueEx(oK1,'DisplayName')\n inlist=[DN.strip(), vkey, pK]\n list.append(inlist)\n \n except:\n pass\n i+=1\n except:\n break\n except:\n pass\n return list \n uninstallkey_32='SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall'\n if 'PROGRAMFILES(X86)' in os.environ.keys():\n \n rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ),\n (_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ),\n (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ),\n (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ)]\n else:\n \n rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey_32,_winreg.KEY_READ),\n (_winreg.HKEY_CURRENT_USER,uninstallkey_32,_winreg.KEY_READ)]\n\n bet=[]\n for i in rklist:\n col=collectprograms(i[0], i[1], i[2])\n for c in col:\n print c\n if blacklist in c:\n bet.append(c[1])\n if not bet:\n print \"Please Mention the valid blacklist Installed Software\"\n else:\n for i in bet:\n print i\n j=i.replace(\" \", '\" \"')\n v='\\\\'\n path=\"HKEY_LOCAL_MACHINE\"+v+i\n path1=\"HKEY_LOCAL_MACHINE\"+v+j\n got=path1\n return got\n inst=check()\n if len(inst)>0:\n find=re.findall(\"{.*}\\s\\sMalwarebytes'\\sManaged\\\\sClient\",inst)\n if len(find)>0:\n final=re.findall('{.*}',find[0])[0] \n if len(final) == 38:\n print \"\\n\\t*)Malwarebytes' Managed Client Uninstallation started......\" \n cmd='msiexec.exe /x %s /quiet REBOOT=ReallySuppress REMOVE=ALL'%final\n os.popen(cmd).read()\n else:\n fin=reg()\n fina=fin.split('\\\\')[-1]\n final1=re.findall('{.*}',fina)[0]\n print \"\\n\\t*)Malwarebytes' Managed Client Uninstallation started......\" \n cmd='msiexec.exe /x %s /quiet REBOOT=ReallySuppress REMOVE=ALL'%final1\n os.popen(cmd).read()\n \n \ndef checkapp(AppName):\n import _winreg\n import os\n AppName = AppName.lower()\n def DNDS(rtkey, pK, kA):\n ln = []\n lv = []\n try:\n oK = _winreg.OpenKey(rtkey, pK, 0, kA)\n i = 0\n while True:\n try:\n bkey = _winreg.EnumKey(oK, i)\n vkey = os.path.join(pK, bkey)\n oK1 = _winreg.OpenKey(rtkey, vkey, 0, kA)\n try:\n tls = []\n DN, bla = _winreg.QueryValueEx(oK1, 'DisplayName')\n DV, bla = _winreg.QueryValueEx(oK1, 'DisplayVersion')\n _winreg.CloseKey(oK1)\n ln.append(DN)\n lv.append(DV)\n except:\n pass\n i += 1\n except:\n break\n _winreg.CloseKey(oK)\n return zip(ln, lv)\n except:\n return zip(ln, lv)\n\n rK = _winreg.HKEY_LOCAL_MACHINE\n sK = r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment'\n openedKey = _winreg.OpenKey(rK, sK, 0, _winreg.KEY_READ)\n arch, bla = _winreg.QueryValueEx(openedKey, 'PROCESSOR_ARCHITECTURE')\n arch = str(arch)\n _winreg.CloseKey(openedKey)\n\n if arch == 'AMD64':\n fList = DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_WOW64_32KEY | _winreg.KEY_READ)\n fList.extend(DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_WOW64_64KEY | _winreg.KEY_READ))\n fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_WOW64_32KEY | _winreg.KEY_READ))\n fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_WOW64_64KEY | _winreg.KEY_READ))\n else:\n fList = DNDS(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_READ)\n fList.extend(DNDS(_winreg.HKEY_CURRENT_USER, r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall', _winreg.KEY_READ))\n fList = set(fList)\n\n lr = []\n rs = 0\n for i in fList:\n a, b = i\n if AppName in a.lower():\n lr.append('success: {} is installed'.format(a))\n lr.append('{:<25}{:5}'.format(a, b))\n rs += 1\n else:\n rs += 0\n if rs:\n return True\n return False\n\ndef recheck():\n app3=checkapp('Malwarebytes Anti-Malware')\n app8=checkapp('Malwarebytes Anti-Malware')\n if app3:\n print '\\n\\t\\t*)Try again with Uninstall String'\n gid('Malwarebytes Anti-Malware')\n if app8:\n print \"\\n\\t\\t*)Malwarebytes Anti-Malware Uninstalled Failed....\"\n return '0'\n else:\n print \"\\n\\t\\t*)Malwarebytes Anti-Malware Uninstalled Successfully....\"\n return '1'\n else:\n print \"\\n\\t*)Malwarebytes Anti-Malware Uninstalled Successfully....\"\n return '1'\ndef recheck1():\n app4=checkapp('Malwarebytes Anti-Exploit')\n app9=checkapp('Malwarebytes Anti-Malware')\n if app4:\n print '\\n\\t\\t*)Try again with Uninstall String'\n gid('Malwarebytes Anti-Exploit')\n if app9:\n print \"\\n\\t\\t*)Malwarebytes Anti-Exploit Uninstalled Failed....\"\n return '0'\n else:\n print \"\\n\\t\\t*)Malwarebytes Anti-Exploit Uninstalled Successfully....\"\n return '1'\n\n else:\n print \"\\n\\t*)Malwarebytes Anti-Exploit Uninstalled Successfully....\"\n return '1'\ndef recheck2():\n app6=checkapp('Malwarebytes version')\n app10=checkapp('Malwarebytes Anti-Malware')\n if app6:\n print '\\n\\t\\t*)Try again with Uninstall String'\n gid('Malwarebytes version')\n if app10:\n print \"\\n\\t\\t*)Malwarebytes Uninstalled Failed....\"\n return '0'\n else:\n print \"\\n\\t\\t*)Malwarebytes Uninstalled Successfully....\"\n return '1'\n \n else:\n print \"\\n\\t*)Malwarebytes Uninstalled Successfully....\"\n return '1'\n\ndef recheck3():\n app7=checkapp(\"Malwarebytes' Managed Client\")\n app11=checkapp('Malwarebytes Anti-Malware')\n if app7:\n print \"\\n\\t*)Malwarebytes' Managed Client Uninstalled Failed....\" \n else:\n print \"\\n\\t*)Malwarebytes' Managed Client Uninstalled Successfully....\"\n return '1'\n \n\napp1=checkapp('Malwarebytes Anti-Malware')\napp2=checkapp('Malwarebytes Anti-Exploit')\napp5=checkapp('Malwarebytes version')\napp7=checkapp(\"Malwarebytes' Managed Client\")\n\nif app1:\n print \"Malwarebytes Anti-Malware is Found in the system\"\n uni()\n r=recheck()\nelse:\n print \"\\nMalwarebytes Anti-Malware is not found in the system\"\n r=1\nif app2:\n print \"\\nMalwarebytes Anti-Exploit is Found in the System\"\n uni2()\n r1=recheck1()\nelse:\n print \"\\nMalwarebytes Anti-Exploit is not found in the system\"\n r1=1\nif app5:\n print \"\\nMalwarebytes is Found in the system\"\n uni3()\n r2=recheck2()\nelse:\n print \"\\nMalwarebytes is not found in the system\"\n r2=1\n\nif app7:\n print \"\\nMalwarebytes' Managed Client is Found in the system\"\n uni4()\n r3=recheck3()\nelse:\n print \"\\nMalwarebytes' Managed Client is not found in the system\"\n r3=1\n\n \n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from mpi4py import MPI import matplotlib from tmm import coh_tmm import pandas as pd import os from numpy import pi from scipy.interpolate import interp1d from joblib import Parallel, delayed import numpy as np import glob import matplotlib.pyplot as plt import pickle as pkl import seaborn as sns from scipy.optimize import minimize import json from tqdm import tqdm DATABASE = './data' INSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe'] METALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni'] num_workers = 8 def cal_reward(R, T, A, target): ''' Calculate reward based on given spectrums. We calculate the reward using averaged (1-mse). Args: R, T, A: numpy array. Reflection, transmission, and absorption spectrums, respectively. target: dict. {'R':np.array, 'T':np.array, 'A':np.array} Returns: reward: float. Reward for the spectrum. ''' reward = 0 for k, v in target.items(): if k == 'R': res = R elif k == 'T': res = T else: res = A reward += 1 - np.abs(res.squeeze() - v).mean() reward /= len(target) return reward class Memory: def __init__(self): self.actions = [] self.states = [] self.logprobs = [] self.rewards = [] self.is_terminals = [] def clear_memory(self): del self.actions[:] del self.states[:] del self.logprobs[:] del self.rewards[:] del self.is_terminals[:] def batch_spectrum(env, names_list, thickness_list): def spectrum(args): ''' Inputs: 1. names: list of lists, each list correspond to the structures 2. thickness: list of lists ''' names, thickness = args R, T, A = env.spectrum(names, thickness, 0, False) return R, T, A res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args) for args in zip(names_list, thickness_list)) res = np.array(res) Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :] return Rs, Ts, As def merge_layers(categories, thicknesses): ''' Merges consecutive layers with the same material types. ''' thicknesses = thicknesses[1:-1] c_output = [categories[0]] t_output = [thicknesses[0]] for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])): if c == c_output[-1]: t_output[-1] += d continue else: c_output.append(c) t_output.append(d) t_output.insert(0, np.inf) t_output.insert(len(t_output), np.inf) return c_output, t_output def get_structure(categories, values, materials, ds, continuous=False, max_value=400): ''' Given categories and values, return the strucure in the form (name (str), thickness (nm)) ''' def threshold(value): ''' ''' names = [materials[item] for item in categories] if not continuous: thickness = [np.inf] + [ds[item] for item in values] + [np.inf] else: thickness = [] for category, value in zip(categories, values): name = materials[category] if name == 'Ag': thickness.append( min(max(15, int(value * max_value//2)), max_value)) elif name in METALS: thickness.append( min(max(5, int(value * max_value//2)), max_value)) elif name in INSULATORS: thickness.append( min(max(1, int(value * max_value//2)), max_value)) else: raise ValueError('Material not known') # thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i, # item in enumerate(values)] + [np.inf] thickness = [np.inf] + thickness + [np.inf] return names, thickness class DesignTracker(): def __init__(self, epochs, **kwargs): """ This class tracks the best designs discovered. """ if epochs == -1: self.layer_ls = [] self.thick_ls = [] self.max_ret_ls = [] self.layer_ls = [0] * epochs self.thick_ls = [0] * epochs self.max_ret_ls = [0] * epochs self.kwargs = kwargs self.current_e = 0 def store(self, layers, thicknesses, ret, e, append_mode=False): if append_mode: self.layer_ls.append(layers) self.thick_ls.append(thicknesses) self.max_ret_ls.append(ret) else: if ret >= self.max_ret_ls[e]: self.layer_ls[e] = layers self.thick_ls[e] = thicknesses self.max_ret_ls[e] = ret def save_state(self): # save buffer from all processes comm = MPI.COMM_WORLD rank = comm.Get_rank() filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank)) pkl.dump(self, open(filename, 'wb')) def print_progress(self): progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls)) read_progress = [] for i in range(len(progress)): if progress[i] == (0,0,0): break read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])]) return read_progress def print_progress(progress): for i in range(len(progress)): print(progress[i], 0) progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]] return progress class TMM_sim(): def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500): ''' This class returns the spectrum given the designed structures. ''' self.mats = mats # include substrate self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats self.wavelength = wavelength self.nk_dict = self.load_materials() self.substrate = substrate self.substrate_thick = substrate_thick def load_materials(self): ''' Load material nk and return corresponding interpolators. Return: nk_dict: dict, key -- material name, value: n, k in the self.wavelength range ''' nk_dict = {} for mat in self.all_mats: nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv')) nk.dropna(inplace=True) wl = nk['wl'].to_numpy() index = (nk['n'] + nk['k'] * 1.j).to_numpy() mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis])) mat_nk_fn = interp1d( mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic') nk_dict[mat] = mat_nk_fn(self.wavelength) return nk_dict def spectrum(self, materials, thickness, theta=0, plot=False, title=False): ''' Input: materials: list thickness: list theta: degree, the incidence angle Return: s: array, spectrum ''' degree = pi/180 if self.substrate != 'Air': thickness.insert(-1, self.substrate_thick) # substrate thickness R, T, A = [], [], [] for i, lambda_vac in enumerate(self.wavelength * 1e3): # we assume the last layer is glass if self.substrate == 'Glass': n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1] elif self.substrate == 'Air': n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1] else: n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1] # n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]] # mport pdb; pdb.set_trace() res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac) R.append(res['R']) T.append(res['T']) R, T = np.array(R), np.array(T) A = 1 - R - T if plot: self.plot_spectrum(R, T, A) if title: thick = thickness[1:-1] title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(thick, materials)]) if self.substrate is not 'Air': title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air' else: title = 'Air | ' + title + ' | Air' plt.title(title, **{'size': '10'}) return R, T, A def plot_spectrum(self, R, T, A): plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self.wavelength * 1000, A, linewidth=3) plt.ylabel('R/T/A') plt.xlabel('Wavelength (nm)') plt.legend(['R: Average = {:.2f}%'. format(np.mean(R)*100), 'T: Average = {:.2f}%'. format(np.mean(T)*100), 'A: Average = {:.2f}%'. format(np.mean(A)*100)]) plt.grid('on', linestyle='--') plt.ylim([0, 1]) # Plotting utils def visualize_progress(file, x, ax=None, color='b', alpha=1): df = pd.read_csv(file, sep="\t") width = 0.5 # x = 'Time' if ax is None: fig, ax = plt.subplots(2,1) sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha) # ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))]) sns.lineplot(x=x, y='AverageEpRet', data=df, ax=ax[1], color=color, alpha=alpha) plt.fill_between(df[x], df['AverageEpRet']-width/2*df['StdEpRet'], df['AverageEpRet']+width/2*df['StdEpRet'], alpha=0.3, color=color) return df def combine_tracker(folder): ''' Merge all buffers ''' trackers = [] if 'design_tracker_merged.pkl' in os.listdir(folder): tracker_file = os.path.join(folder, 'design_tracker_merged.pkl') combined_tracker = pkl.load(open(tracker_file, 'rb')) return combined_tracker for file in os.listdir(folder): if file.startswith('design_tracker_'): tracker_file = os.path.join(folder, file) trackers.append(pkl.load(open(tracker_file, 'rb'))) combined_tracker = DesignTracker(len(trackers[0].layer_ls)) max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0) for e in range(len(trackers[0].layer_ls)): combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e] combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e] combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e] if combined_tracker.layer_ls[-1] != 0: tracker_file = os.path.join(folder, 'design_tracker_merged.pkl') pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb')) return combined_tracker def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'): root = '../spinningup/data/' progress_ls = [] max_ret_ls = [] params = {'size':14} matplotlib.rc('font', **params) fig, ax = plt.subplots(2,1, figsize=(10,8)) for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls): folder = os.path.join(root, exp, exp+'_s{}'.format(seed)) progress_file = os.path.join(folder, 'progress.txt') df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a) tracker = combine_tracker(folder) progress = tracker.print_progress() print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)])) progress_ls.append(progress) max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet']))) ax[0].legend(max_ret_ls) ax[1].legend(exp_ls) plt.show() return progress_ls def load_exp_res(folder): subfolders = [item for item in glob.glob(folder+'/*')] def read_hyper(file_name, rep=10): with open(os.path.join(file_name, 'config.json')) as f: hypers = json.load(f) hypers_dict = {} for k, v in hypers.items(): if k.startswith('logger'): continue elif isinstance(v, dict): for kk, vv in v.items(): if isinstance(vv, list): hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep else: hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep else: hypers_dict[k] = [v] * rep hyper_df = pd.DataFrame(hypers_dict) return hyper_df first=True # first pandas file to load for subfolder in tqdm(subfolders): runs = glob.glob(subfolder+'/*') num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\t')) for run in runs: tracker = combine_tracker(run) progress = tracker.print_progress() best_design = progress[np.argmax(tracker.max_ret_ls)] if first: df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t') hyper_df = read_hyper(run, rep=len(df)) best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df)) df = pd.concat([df, hyper_df, best_designs_df], axis=1) first = False else: df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t') hyper_df = read_hyper(run, rep=len(df_)) best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_)) df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1) df = pd.concat([df, df_], axis=0) return df def finetune(simulator, m0, x0, target, display=False, bounds=None): ''' Finetune the structure using quasi-Newton's method. Args: m0: materials list given by the upstream RL x0: thicknesses given by the upstream RL display: if true, then plot the spectrum before and after the finetuning. Returns: x_opt: finetuned thickness list ''' def objective_func(x): R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf]) return 1-cal_reward(R, T, A, target) if bounds is None: bounds = [(15, 200)] * len(x0) res = minimize(objective_func, x0, bounds=bounds, options={'disp':True}) x_opt = [int(item) for item in res.x] if display: plt.figure() simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True) plt.figure() simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True) return x_opt, res
normal
{ "blob_id": "f23bc0c277967d8e7a94a49c5a81ed5fb75d36cc", "index": 9327, "step-1": "<mask token>\n\n\nclass Memory:\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n", "step-4": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n \"\"\"\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n \"\"\"\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n return R, T, A\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args) for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep='\\t')\n width = 0.5\n if ax is None:\n fig, ax = plt.subplots(2, 1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha\n )\n sns.lineplot(x=x, y='AverageEpRet', data=df, ax=ax[1], color=color,\n alpha=alpha)\n plt.fill_between(df[x], df['AverageEpRet'] - width / 2 * df['StdEpRet'],\n df['AverageEpRet'] + width / 2 * df['StdEpRet'], alpha=0.3, color=color\n )\n return df\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n", "step-5": "from mpi4py import MPI\nimport matplotlib\nfrom tmm import coh_tmm\nimport pandas as pd\nimport os\nfrom numpy import pi\nfrom scipy.interpolate import interp1d\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport pickle as pkl\nimport seaborn as sns\nfrom scipy.optimize import minimize\nimport json\nfrom tqdm import tqdm\n\nDATABASE = './data'\nINSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']\nMETALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']\n\nnum_workers = 8\n\ndef cal_reward(R, T, A, target):\n '''\n Calculate reward based on given spectrums. \n We calculate the reward using averaged (1-mse).\n\n Args:\n R, T, A: numpy array. Reflection, transmission, and \n absorption spectrums, respectively.\n target: dict. {'R':np.array, 'T':np.array, 'A':np.array}\n\n Returns:\n reward: float. Reward for the spectrum. \n '''\n\n reward = 0\n for k, v in target.items():\n\n if k == 'R':\n res = R\n elif k == 'T':\n res = T\n else:\n res = A\n \n reward += 1 - np.abs(res.squeeze() - v).mean()\n\n reward /= len(target)\n\n return reward\n\n\nclass Memory:\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n '''\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n '''\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n\n return R, T, A\n\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)\n for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n '''\n Merges consecutive layers with the same material types.\n '''\n\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n '''\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n '''\n\n def threshold(value):\n '''\n\n '''\n\n names = [materials[item] for item in categories]\n\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(\n min(max(15, int(value * max_value//2)), max_value))\n elif name in METALS:\n thickness.append(\n min(max(5, int(value * max_value//2)), max_value))\n elif name in INSULATORS:\n thickness.append(\n min(max(1, int(value * max_value//2)), max_value))\n else:\n raise ValueError('Material not known')\n # thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,\n # item in enumerate(values)] + [np.inf]\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\nclass DesignTracker():\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n \n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n\n else:\n if ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n # save buffer from all processes\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n \n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0,0,0):\n break\n read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])\n\n return read_progress\n\ndef print_progress(progress):\n\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]\n\n return progress\n\nclass TMM_sim():\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):\n '''\n This class returns the spectrum given the designed structures.\n '''\n self.mats = mats\n # include substrate\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n '''\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n '''\n nk_dict = {}\n\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n\n\n mat_nk_fn = interp1d(\n mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n '''\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n '''\n degree = pi/180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick) # substrate thickness\n\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1e3):\n\n # we assume the last layer is glass\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]\n\n # n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]\n\n # mport pdb; pdb.set_trace()\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m)\n for d, m in zip(thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n\n plt.plot(self.wavelength * 1000, R, self.wavelength *\n 1000, T, self.wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.\n format(np.mean(R)*100),\n 'T: Average = {:.2f}%'.\n format(np.mean(T)*100),\n 'A: Average = {:.2f}%'.\n format(np.mean(A)*100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n# Plotting utils\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep=\"\\t\")\n width = 0.5\n # x = 'Time'\n if ax is None:\n fig, ax = plt.subplots(2,1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)\n # ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])\n sns.lineplot(x=x, y='AverageEpRet', data=df,\n ax=ax[1], color=color, alpha=alpha)\n plt.fill_between(df[x],\n df['AverageEpRet']-width/2*df['StdEpRet'],\n df['AverageEpRet']+width/2*df['StdEpRet'],\n alpha=0.3, color=color)\n\n return df\n\ndef combine_tracker(folder):\n '''\n Merge all buffers\n '''\n trackers = []\n \n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb'))) \n\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n \n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))\n\n return combined_tracker\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n \n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n\n params = {'size':14}\n matplotlib.rc('font', **params)\n\n fig, ax = plt.subplots(2,1, figsize=(10,8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp+'_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder+'/*')]\n\n def read_hyper(file_name, rep=10):\n\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep\n else:\n hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep\n else: \n hypers_dict[k] = [v] * rep\n \n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df \n\n first=True # first pandas file to load\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder+'/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\\t'))\n for run in runs:\n\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0) \n\n return df \n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n '''\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n '''\n \n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])\n return 1-cal_reward(R, T, A, target)\n \n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n \n res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})\n x_opt = [int(item) for item in res.x]\n \n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)\n \n return x_opt, res\n", "step-ids": [ 11, 16, 20, 22, 26 ] }
[ 11, 16, 20, 22, 26 ]
# import time module, Observer, FileSystemEventHandler import os import time from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler import pyAesCrypt import logging logging.basicConfig(filename="Decryptor.log", level=logging.INFO, format="%(asctime)s:%(filename)s:%(lineno)d:%(message)s") desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop') decryptor_path = desktop+"\\aes_decryptor" if not os.path.exists(decryptor_path): os.mkdir(decryptor_path) class OnMyWatch: # Set the directory on watch watchDirectory = decryptor_path def __init__(self): self.observer = Observer() def run(self): event_handler = Handler() self.observer.schedule( event_handler, self.watchDirectory, recursive=True) self.observer.start() try: while True: time.sleep(5) except: self.observer.stop() self.observer.join() class Handler(FileSystemEventHandler): @staticmethod def on_any_event(event): if event.is_directory: return None elif (event.event_type == 'created' or event.event_type == 'modified') and event.event_type != 'deleted': # Event is modified, you can process it now logging.info(f"Watchdog received modified event - {event.src_path}") srcPath = event.src_path if srcPath.find(".aes") != -1: decrptor(srcPath) else: pass else: pass def decrptor(srcPath): bufferSize = 64 * 1024 password = "js198989" try: infile = srcPath outfile = srcPath.replace('.aes', '') pwd = password buffSize = bufferSize pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize) os.remove(infile) return True except Exception as ex: logging.exception(f"ERROR-MESSAGE") pass if __name__ == '__main__': logging.info("Decryptor Started Working...") watch = OnMyWatch() watch.run()
normal
{ "blob_id": "6261d06ac7bdcb3ae25cd06338c4c41c3c5f5023", "index": 7615, "step-1": "<mask token>\n\n\nclass OnMyWatch:\n <mask token>\n <mask token>\n\n def run(self):\n event_handler = Handler()\n self.observer.schedule(event_handler, self.watchDirectory,\n recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n\n @staticmethod\n def on_any_event(event):\n if event.is_directory:\n return None\n elif (event.event_type == 'created' or event.event_type == 'modified'\n ) and event.event_type != 'deleted':\n logging.info(f'Watchdog received modified event - {event.src_path}'\n )\n srcPath = event.src_path\n if srcPath.find('.aes') != -1:\n decrptor(srcPath)\n else:\n pass\n else:\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass OnMyWatch:\n watchDirectory = decryptor_path\n\n def __init__(self):\n self.observer = Observer()\n\n def run(self):\n event_handler = Handler()\n self.observer.schedule(event_handler, self.watchDirectory,\n recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n\n @staticmethod\n def on_any_event(event):\n if event.is_directory:\n return None\n elif (event.event_type == 'created' or event.event_type == 'modified'\n ) and event.event_type != 'deleted':\n logging.info(f'Watchdog received modified event - {event.src_path}'\n )\n srcPath = event.src_path\n if srcPath.find('.aes') != -1:\n decrptor(srcPath)\n else:\n pass\n else:\n pass\n\n\ndef decrptor(srcPath):\n bufferSize = 64 * 1024\n password = 'js198989'\n try:\n infile = srcPath\n outfile = srcPath.replace('.aes', '')\n pwd = password\n buffSize = bufferSize\n pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize)\n os.remove(infile)\n return True\n except Exception as ex:\n logging.exception(f'ERROR-MESSAGE')\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\nlogging.basicConfig(filename='Decryptor.log', level=logging.INFO, format=\n '%(asctime)s:%(filename)s:%(lineno)d:%(message)s')\n<mask token>\nif not os.path.exists(decryptor_path):\n os.mkdir(decryptor_path)\n\n\nclass OnMyWatch:\n watchDirectory = decryptor_path\n\n def __init__(self):\n self.observer = Observer()\n\n def run(self):\n event_handler = Handler()\n self.observer.schedule(event_handler, self.watchDirectory,\n recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n\n @staticmethod\n def on_any_event(event):\n if event.is_directory:\n return None\n elif (event.event_type == 'created' or event.event_type == 'modified'\n ) and event.event_type != 'deleted':\n logging.info(f'Watchdog received modified event - {event.src_path}'\n )\n srcPath = event.src_path\n if srcPath.find('.aes') != -1:\n decrptor(srcPath)\n else:\n pass\n else:\n pass\n\n\ndef decrptor(srcPath):\n bufferSize = 64 * 1024\n password = 'js198989'\n try:\n infile = srcPath\n outfile = srcPath.replace('.aes', '')\n pwd = password\n buffSize = bufferSize\n pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize)\n os.remove(infile)\n return True\n except Exception as ex:\n logging.exception(f'ERROR-MESSAGE')\n pass\n\n\nif __name__ == '__main__':\n logging.info('Decryptor Started Working...')\n watch = OnMyWatch()\n watch.run()\n", "step-4": "import os\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nimport pyAesCrypt\nimport logging\nlogging.basicConfig(filename='Decryptor.log', level=logging.INFO, format=\n '%(asctime)s:%(filename)s:%(lineno)d:%(message)s')\ndesktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\ndecryptor_path = desktop + '\\\\aes_decryptor'\nif not os.path.exists(decryptor_path):\n os.mkdir(decryptor_path)\n\n\nclass OnMyWatch:\n watchDirectory = decryptor_path\n\n def __init__(self):\n self.observer = Observer()\n\n def run(self):\n event_handler = Handler()\n self.observer.schedule(event_handler, self.watchDirectory,\n recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n\n @staticmethod\n def on_any_event(event):\n if event.is_directory:\n return None\n elif (event.event_type == 'created' or event.event_type == 'modified'\n ) and event.event_type != 'deleted':\n logging.info(f'Watchdog received modified event - {event.src_path}'\n )\n srcPath = event.src_path\n if srcPath.find('.aes') != -1:\n decrptor(srcPath)\n else:\n pass\n else:\n pass\n\n\ndef decrptor(srcPath):\n bufferSize = 64 * 1024\n password = 'js198989'\n try:\n infile = srcPath\n outfile = srcPath.replace('.aes', '')\n pwd = password\n buffSize = bufferSize\n pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize)\n os.remove(infile)\n return True\n except Exception as ex:\n logging.exception(f'ERROR-MESSAGE')\n pass\n\n\nif __name__ == '__main__':\n logging.info('Decryptor Started Working...')\n watch = OnMyWatch()\n watch.run()\n", "step-5": "# import time module, Observer, FileSystemEventHandler\nimport os\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nimport pyAesCrypt\nimport logging\nlogging.basicConfig(filename=\"Decryptor.log\",\n level=logging.INFO, format=\"%(asctime)s:%(filename)s:%(lineno)d:%(message)s\")\n\ndesktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\ndecryptor_path = desktop+\"\\\\aes_decryptor\"\n\nif not os.path.exists(decryptor_path):\n os.mkdir(decryptor_path)\n\n\nclass OnMyWatch:\n # Set the directory on watch\n watchDirectory = decryptor_path\n\n def __init__(self):\n self.observer = Observer()\n\n def run(self):\n event_handler = Handler()\n self.observer.schedule(\n event_handler, self.watchDirectory, recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n @staticmethod\n def on_any_event(event):\n if event.is_directory:\n return None\n elif (event.event_type == 'created' or event.event_type == 'modified') and event.event_type != 'deleted':\n # Event is modified, you can process it now\n logging.info(f\"Watchdog received modified event - {event.src_path}\")\n srcPath = event.src_path\n if srcPath.find(\".aes\") != -1:\n decrptor(srcPath)\n else:\n pass\n else:\n pass\n \n \ndef decrptor(srcPath):\n bufferSize = 64 * 1024\n password = \"js198989\"\n try:\n infile = srcPath\n outfile = srcPath.replace('.aes', '')\n pwd = password\n buffSize = bufferSize\n pyAesCrypt.decryptFile(infile, outfile, pwd, buffSize)\n os.remove(infile)\n return True\n except Exception as ex:\n logging.exception(f\"ERROR-MESSAGE\")\n pass\n\n\nif __name__ == '__main__':\n logging.info(\"Decryptor Started Working...\")\n watch = OnMyWatch()\n watch.run()", "step-ids": [ 4, 7, 8, 10, 11 ] }
[ 4, 7, 8, 10, 11 ]
# -*- coding: utf-8 - # # This file is part of gaffer. See the NOTICE for more information. import os from .base import Command from ...httpclient import Server class Load(Command): """\ Load a Procfile application to gafferd ====================================== This command allows you to load your Procfile application in gafferd. Command line ------------ $ gaffer load [name] [url] Arguments +++++++++ *name* is the name of the group of process recoreded in gafferd. By default it will be the name of your project folder.You can use ``.`` to specify the current folder. *uri* is the url to connect to a gaffer node. By default 'http://127.0.0.1:5000' Options +++++++ **--endpoint** Gaffer node URL to connect. """ name = "load" def run(self, procfile, pargs): args = pargs.args # get args uri = None if len(args) == 2: group = args[0] uri = args[1] elif len(args) == 1: group = args[0] else: group = "." if pargs.endpoint: uri = pargs.endpoint if not uri: uri = "http://127.0.0.1:5000" # get the default groupname if group == ".": group = procfile.get_groupname() # create a server instance s = Server(uri) # finally manage group conflicts group = self.find_groupname(group, s) # parse the concurrency settings concurrency = self.parse_concurrency(pargs) # finally send the processes for name, cmd_str in procfile.processes(): cmd, args = procfile.parse_cmd(cmd_str) pname = "%s:%s" % (group, name) params = dict(args=args, env=procfile.env, numprocesses=concurrency.get(name, 1), redirect_output=['out', 'err'], cwd=os.path.abspath(procfile.root)) s.add_process(pname, cmd, **params) print("%r has been loaded in %s" % (group, uri)) def find_groupname(self, g, s): tries = 0 while True: groups = s.groups() if g not in groups: return g if tries > 3: raise RuntimeError("%r is conflicting, try to pass a new one") i = 0 while True: g = "%s.%s" % (g, i) if g not in groups: break tries += 1
normal
{ "blob_id": "eb5256543d6095668d6eeaf6cfdc9f744d7c73c5", "index": 2267, "step-1": "<mask token>\n\n\nclass Load(Command):\n <mask token>\n <mask token>\n <mask token>\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n", "step-2": "<mask token>\n\n\nclass Load(Command):\n <mask token>\n <mask token>\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n", "step-3": "<mask token>\n\n\nclass Load(Command):\n \"\"\" Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n name = 'load'\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n", "step-4": "import os\nfrom .base import Command\nfrom ...httpclient import Server\n\n\nclass Load(Command):\n \"\"\" Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n name = 'load'\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n", "step-5": "# -*- coding: utf-8 -\n#\n# This file is part of gaffer. See the NOTICE for more information.\n\nimport os\n\nfrom .base import Command\nfrom ...httpclient import Server\n\nclass Load(Command):\n \"\"\"\\\n Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n\n name = \"load\"\n\n def run(self, procfile, pargs):\n args = pargs.args\n\n # get args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = \".\"\n\n if pargs.endpoint:\n uri = pargs.endpoint\n\n if not uri:\n uri = \"http://127.0.0.1:5000\"\n\n # get the default groupname\n if group == \".\":\n group = procfile.get_groupname()\n\n # create a server instance\n s = Server(uri)\n\n # finally manage group conflicts\n group = self.find_groupname(group, s)\n\n # parse the concurrency settings\n concurrency = self.parse_concurrency(pargs)\n\n # finally send the processes\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n\n pname = \"%s:%s\" % (group, name)\n params = dict(args=args, env=procfile.env,\n numprocesses=concurrency.get(name, 1),\n redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print(\"%r has been loaded in %s\" % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n\n if tries > 3:\n raise RuntimeError(\"%r is conflicting, try to pass a new one\")\n\n i = 0\n while True:\n g = \"%s.%s\" % (g, i)\n if g not in groups:\n break\n tries += 1\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
# (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ipyhi ipyhi is a Jupyter notebook notification system. It is based on the jupyter-notify package. """ import os from setuptools import find_packages, setup MAJOR = 0 MINOR = 1 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) FULLVERSION = VERSION DOCLINES = __doc__.split('\n') DESCRIPTION = DOCLINES[0] LONG_DESCRIPTION = "\n".join(DOCLINES[2:]) def git_short_hash(): try: git_str = "+" + os.popen('git log -1 --format="%h"').read().strip() except: # pylint: disable=bare-except git_str = "" else: if git_str == '+': #fixes setuptools PEP issues with versioning git_str = '' return git_str if not ISRELEASED: FULLVERSION += '.dev'+str(MICRO)+git_short_hash() def write_version_py(filename='ipyhi/version.py'): cnt = """\ # THIS FILE IS GENERATED FROM IPYHI SETUP.PY # pylint: disable=missing-module-docstring short_version = '%(version)s' version = '%(fullversion)s' release = %(isrelease)s """ a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'fullversion': FULLVERSION, 'isrelease': str(ISRELEASED)}) finally: a.close() setup( name='ipyhi', version=VERSION, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author='Paul Nation', author_email='[email protected]', url='https://github.com/nonhermitian/ipyhi', license='Apache-2', packages=find_packages(exclude=('tests', 'docs')), package_data={'ipyhi': ['js/*.js']}, install_requires=[ 'ipython', 'jupyter', 'ipywidgets' ], classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9' ] )
normal
{ "blob_id": "2e2de50a7d366ca1a98d29b33ed157a1e8445ada", "index": 3523, "step-1": "<mask token>\n\n\ndef git_short_hash():\n try:\n git_str = '+' + os.popen('git log -1 --format=\"%h\"').read().strip()\n except:\n git_str = ''\n else:\n if git_str == '+':\n git_str = ''\n return git_str\n\n\n<mask token>\n\n\ndef write_version_py(filename='ipyhi/version.py'):\n cnt = \"\"\"# THIS FILE IS GENERATED FROM IPYHI SETUP.PY\n# pylint: disable=missing-module-docstring\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion': FULLVERSION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef git_short_hash():\n try:\n git_str = '+' + os.popen('git log -1 --format=\"%h\"').read().strip()\n except:\n git_str = ''\n else:\n if git_str == '+':\n git_str = ''\n return git_str\n\n\nif not ISRELEASED:\n FULLVERSION += '.dev' + str(MICRO) + git_short_hash()\n\n\ndef write_version_py(filename='ipyhi/version.py'):\n cnt = \"\"\"# THIS FILE IS GENERATED FROM IPYHI SETUP.PY\n# pylint: disable=missing-module-docstring\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion': FULLVERSION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\nsetup(name='ipyhi', version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, author='Paul Nation', author_email=\n '[email protected]', url='https://github.com/nonhermitian/ipyhi',\n license='Apache-2', packages=find_packages(exclude=('tests', 'docs')),\n package_data={'ipyhi': ['js/*.js']}, install_requires=['ipython',\n 'jupyter', 'ipywidgets'], classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'])\n", "step-3": "<mask token>\nMAJOR = 0\nMINOR = 1\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nFULLVERSION = VERSION\nDOCLINES = __doc__.split('\\n')\nDESCRIPTION = DOCLINES[0]\nLONG_DESCRIPTION = '\\n'.join(DOCLINES[2:])\n\n\ndef git_short_hash():\n try:\n git_str = '+' + os.popen('git log -1 --format=\"%h\"').read().strip()\n except:\n git_str = ''\n else:\n if git_str == '+':\n git_str = ''\n return git_str\n\n\nif not ISRELEASED:\n FULLVERSION += '.dev' + str(MICRO) + git_short_hash()\n\n\ndef write_version_py(filename='ipyhi/version.py'):\n cnt = \"\"\"# THIS FILE IS GENERATED FROM IPYHI SETUP.PY\n# pylint: disable=missing-module-docstring\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion': FULLVERSION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\nsetup(name='ipyhi', version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, author='Paul Nation', author_email=\n '[email protected]', url='https://github.com/nonhermitian/ipyhi',\n license='Apache-2', packages=find_packages(exclude=('tests', 'docs')),\n package_data={'ipyhi': ['js/*.js']}, install_requires=['ipython',\n 'jupyter', 'ipywidgets'], classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'])\n", "step-4": "<mask token>\nimport os\nfrom setuptools import find_packages, setup\nMAJOR = 0\nMINOR = 1\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nFULLVERSION = VERSION\nDOCLINES = __doc__.split('\\n')\nDESCRIPTION = DOCLINES[0]\nLONG_DESCRIPTION = '\\n'.join(DOCLINES[2:])\n\n\ndef git_short_hash():\n try:\n git_str = '+' + os.popen('git log -1 --format=\"%h\"').read().strip()\n except:\n git_str = ''\n else:\n if git_str == '+':\n git_str = ''\n return git_str\n\n\nif not ISRELEASED:\n FULLVERSION += '.dev' + str(MICRO) + git_short_hash()\n\n\ndef write_version_py(filename='ipyhi/version.py'):\n cnt = \"\"\"# THIS FILE IS GENERATED FROM IPYHI SETUP.PY\n# pylint: disable=missing-module-docstring\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion': FULLVERSION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\nsetup(name='ipyhi', version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, author='Paul Nation', author_email=\n '[email protected]', url='https://github.com/nonhermitian/ipyhi',\n license='Apache-2', packages=find_packages(exclude=('tests', 'docs')),\n package_data={'ipyhi': ['js/*.js']}, install_requires=['ipython',\n 'jupyter', 'ipywidgets'], classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'])\n", "step-5": "# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"ipyhi\n\nipyhi is a Jupyter notebook notification system.\nIt is based on the jupyter-notify package.\n\"\"\"\nimport os\nfrom setuptools import find_packages, setup\n\nMAJOR = 0\nMINOR = 1\nMICRO = 0\n\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nFULLVERSION = VERSION\n\nDOCLINES = __doc__.split('\\n')\nDESCRIPTION = DOCLINES[0]\nLONG_DESCRIPTION = \"\\n\".join(DOCLINES[2:])\n\ndef git_short_hash():\n try:\n git_str = \"+\" + os.popen('git log -1 --format=\"%h\"').read().strip()\n except: # pylint: disable=bare-except\n git_str = \"\"\n else:\n if git_str == '+': #fixes setuptools PEP issues with versioning\n git_str = ''\n return git_str\n\nif not ISRELEASED:\n FULLVERSION += '.dev'+str(MICRO)+git_short_hash()\n\ndef write_version_py(filename='ipyhi/version.py'):\n cnt = \"\"\"\\\n# THIS FILE IS GENERATED FROM IPYHI SETUP.PY\n# pylint: disable=missing-module-docstring\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion':\n FULLVERSION, 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\nsetup(\n name='ipyhi',\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author='Paul Nation',\n author_email='[email protected]',\n url='https://github.com/nonhermitian/ipyhi',\n license='Apache-2',\n packages=find_packages(exclude=('tests', 'docs')),\n package_data={'ipyhi': ['js/*.js']},\n install_requires=[\n 'ipython',\n 'jupyter',\n 'ipywidgets'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'\n ]\n)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import logging from abc import ABC from thraxisgamespatterns.application.handler_map_factory import TGHandlerMapFactory from thraxisgamespatterns.eventhandling.event_distributor import TGEventDistributor from thraxisgamespatterns.factories.logging_rule_engine_factory import TGLoggingRuleEngineFactory class TGAbstractRegistry(ABC): def __init__(self): self.rule_engine = TGLoggingRuleEngineFactory().create() self.logger = logging.getLogger() self.event_distributor = TGEventDistributor(logging.getLogger()) self.handler_map_factory = TGHandlerMapFactory().create()
normal
{ "blob_id": "d499b4e189a0c3c6efa6a07871dbc6c2996a2dcb", "index": 2245, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass TGAbstractRegistry(ABC):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TGAbstractRegistry(ABC):\n\n def __init__(self):\n self.rule_engine = TGLoggingRuleEngineFactory().create()\n self.logger = logging.getLogger()\n self.event_distributor = TGEventDistributor(logging.getLogger())\n self.handler_map_factory = TGHandlerMapFactory().create()\n", "step-4": "import logging\nfrom abc import ABC\nfrom thraxisgamespatterns.application.handler_map_factory import TGHandlerMapFactory\nfrom thraxisgamespatterns.eventhandling.event_distributor import TGEventDistributor\nfrom thraxisgamespatterns.factories.logging_rule_engine_factory import TGLoggingRuleEngineFactory\n\n\nclass TGAbstractRegistry(ABC):\n\n def __init__(self):\n self.rule_engine = TGLoggingRuleEngineFactory().create()\n self.logger = logging.getLogger()\n self.event_distributor = TGEventDistributor(logging.getLogger())\n self.handler_map_factory = TGHandlerMapFactory().create()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Question link: https://www.hackerrank.com/challenges/30-scope/problem # Code section: def computeDifference(self): # Add your code here self.maximumDifference = -111111 for i in range(0,len(self.__elements)-1): for j in range(i+1, len(self.__elements)): diff = abs(self.__elements[i]-self.__elements[j]) self.maximumDifference = max(diff, self.maximumDifference)
normal
{ "blob_id": "eb90912d09fca52a43b28ec4c988e3658ddfc219", "index": 605, "step-1": "# Question link: https://www.hackerrank.com/challenges/30-scope/problem\n# Code section:\n\n def computeDifference(self):\n # Add your code here\n self.maximumDifference = -111111\n for i in range(0,len(self.__elements)-1):\n for j in range(i+1, len(self.__elements)):\n diff = abs(self.__elements[i]-self.__elements[j])\n self.maximumDifference = max(diff, self.maximumDifference)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import stats import datetime #takes in a sorted data frame holding the actuals, the predicted values (sorted descending) and the percentile of each obsevation #and returns a new dataframe with all of the appropriate calculations def lift_calculations(df): #adding a sample counter variable df['sample_num'] = range(len(df)) #adding cumulative sum of actual target df['actual_sum'] = df['actual'].cumsum() #column tracking the percentage of samples of total covered df['per_sample_covered'] = ((df['sample_num']+1)*100)/len(df) #percentage of all positives captured df['per_pos_captured'] = (df['actual_sum']/(len(df[df['actual']==1])))*100 #proportion of positives captured from total df['prop_pos_captured_from_all'] = df['per_pos_captured']/df['per_sample_covered'] return df #creates a plot of cumulative positive gain #takes in a dataframe with all of the relevant statistics already calculated def gain_plot(df,figsize=None,x_range=None,y_range=None,legend='on'): #Plot of the cumulative capture of positives as we go across the deciles if figsize: fig = plt.figure(figsize=figsize) plt.plot(pd.Series([0]).append(df['per_sample_covered']), pd.Series([0]).append(df['per_pos_captured'])) #pre-pending zeos to the front of the series for polotting purposes plt.plot([0,100],[0,100]) plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',fontsize=20) plt.xlabel('% of Sample Covered',fontsize=15) plt.ylabel('% of True Positives Captured',fontsize=15) if x_range: plt.xlim(x_range[0],x_range[1]) if y_range: plt.ylim(y_range[0],y_range[1]) if legend=='on': plt.legend(['Predictive Targeting','Random Targeting'],fontsize=12,loc=2) def lift_plot(df,figsize=None,x_range=None,y_range=None,legend='on'): #Lift Curve Plot(at whatever percent of customers cutoff, the model is targeting X times better than random) #i.e. at the XX percentile, the response rate is Y times as good as it would be if targeting at random at the XX percentile if figsize: fig = plt.figure(figsize=figsize) plt.plot(df['per_sample_covered'],df['prop_pos_captured_from_all']) plt.plot([df['per_sample_covered'].min(),100],[1,1]) plt.title('Lift Curve',fontsize=20) plt.xlabel('% of Customers',fontsize=15) plt.ylabel('Lift',fontsize=15) if x_range: plt.xlim(x_range[0],x_range[1]) if y_range: plt.ylim(y_range[0],y_range[1]) if legend=='on': plt.legend(['Predictive Targeting','Random Targeting'],fontsize=12) #a function which takes in an array of predicted values and returns the percentile associated with each one def percentile_gen(arr_y_pred): return np.array(pd.qcut(pd.Series(arr_y_pred).rank(method='first'),100,labels=range(1,101))) #method = first is used in the case when there are a lot of 0s and overlapping of labels #a function which takes in an array of actual test values and the model predicted values and stacks them together #then sorts them and puts them into a dataframe def data_prep(arr_y_test,arr_y_pred): #assigning each observation into a percentile percentiles = percentile_gen(arr_y_pred) #print(percentiles.shape) #joining all the pieces together data = np.hstack((arr_y_test.reshape((len(arr_y_test),1)), arr_y_pred.reshape((len(arr_y_pred),1)), percentiles.reshape((len(percentiles),1)))) #converting to a data frame data_df = pd.DataFrame(data) data_df.columns = ['actual','prob','percentile'] data_df.actual = data_df.actual.astype(int) data_df.prob = data_df.prob.astype('float64') #sorting by the probability data_df = data_df.sort_values(by='prob',ascending=False) #calculating lift metrics data_df = lift_calculations(data_df) return data_df #a function which plots the lift curve for the model def lift_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'): data_df = data_prep(arr_y_test,arr_y_pred) #print(data_df.groupby('percentile').size()) #lift curve plot lift_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend) plt.show() #a function which plots the gain curve for the model def gain_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'): data_df = data_prep(arr_y_test,arr_y_pred) #gain curve plot gain_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend) plt.show() #a function which returns two numpy arrays: #the first one is the percent of samples covered (X-value) #the second being the lift values for the correponding the sample (Y-value) def lift_values_generator(arr_y_test,arr_y_pred): data_df = data_prep(arr_y_test,arr_y_pred) return data_df.per_sample_covered, data_df.prop_pos_captured_from_all #a function which plots multiple lift curves all on the same graph #the first parameter is the x axis which represents %of the sample covered #the second parameter is a list of lists, where each one presents the lift #curve for a particular model, the last parameter holds the labels for the lift #curves in the corresponding order def plot_lift_curves(percent_sample_covered,list_of_lift_metrics,labels,figsize=None,x_range=None,y_range=None,legend='on'): if figsize: plt.figure(figsize=figsize) #plotting the various model lift curves for i,lift_scores in enumerate(list_of_lift_metrics): plt.plot(percent_sample_covered,lift_scores) #base line plot for random guessing plt.plot([percent_sample_covered.min(),100],[1,1]) #formats and labels plt.title('Lift Curves Comparison',fontsize=20) plt.xlabel('% of Customers',fontsize=15) plt.ylabel('Lift',fontsize=15) if x_range: plt.xlim(x_range[0],x_range[1]) if y_range: plt.ylim(y_range[0],y_range[1]) model_labels = labels + ['Random Guessing'] if legend == 'on': plt.legend(model_labels,fontsize=12,loc='best')
normal
{ "blob_id": "8e71ea23d04199e8fb54099c404c5a4e9af6c4b1", "index": 9336, "step-1": "<mask token>\n\n\ndef lift_calculations(df):\n df['sample_num'] = range(len(df))\n df['actual_sum'] = df['actual'].cumsum()\n df['per_sample_covered'] = (df['sample_num'] + 1) * 100 / len(df)\n df['per_pos_captured'] = df['actual_sum'] / len(df[df['actual'] == 1]\n ) * 100\n df['prop_pos_captured_from_all'] = df['per_pos_captured'] / df[\n 'per_sample_covered']\n return df\n\n\ndef gain_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(pd.Series([0]).append(df['per_sample_covered']), pd.Series([0]\n ).append(df['per_pos_captured']))\n plt.plot([0, 100], [0, 100])\n plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',\n fontsize=20)\n plt.xlabel('% of Sample Covered', fontsize=15)\n plt.ylabel('% of True Positives Captured', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=\n 12, loc=2)\n\n\ndef lift_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(df['per_sample_covered'], df['prop_pos_captured_from_all'])\n plt.plot([df['per_sample_covered'].min(), 100], [1, 1])\n plt.title('Lift Curve', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=12)\n\n\n<mask token>\n\n\ndef data_prep(arr_y_test, arr_y_pred):\n percentiles = percentile_gen(arr_y_pred)\n data = np.hstack((arr_y_test.reshape((len(arr_y_test), 1)), arr_y_pred.\n reshape((len(arr_y_pred), 1)), percentiles.reshape((len(percentiles\n ), 1))))\n data_df = pd.DataFrame(data)\n data_df.columns = ['actual', 'prob', 'percentile']\n data_df.actual = data_df.actual.astype(int)\n data_df.prob = data_df.prob.astype('float64')\n data_df = data_df.sort_values(by='prob', ascending=False)\n data_df = lift_calculations(data_df)\n return data_df\n\n\ndef lift_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n lift_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef lift_calculations(df):\n df['sample_num'] = range(len(df))\n df['actual_sum'] = df['actual'].cumsum()\n df['per_sample_covered'] = (df['sample_num'] + 1) * 100 / len(df)\n df['per_pos_captured'] = df['actual_sum'] / len(df[df['actual'] == 1]\n ) * 100\n df['prop_pos_captured_from_all'] = df['per_pos_captured'] / df[\n 'per_sample_covered']\n return df\n\n\ndef gain_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(pd.Series([0]).append(df['per_sample_covered']), pd.Series([0]\n ).append(df['per_pos_captured']))\n plt.plot([0, 100], [0, 100])\n plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',\n fontsize=20)\n plt.xlabel('% of Sample Covered', fontsize=15)\n plt.ylabel('% of True Positives Captured', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=\n 12, loc=2)\n\n\ndef lift_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(df['per_sample_covered'], df['prop_pos_captured_from_all'])\n plt.plot([df['per_sample_covered'].min(), 100], [1, 1])\n plt.title('Lift Curve', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=12)\n\n\n<mask token>\n\n\ndef data_prep(arr_y_test, arr_y_pred):\n percentiles = percentile_gen(arr_y_pred)\n data = np.hstack((arr_y_test.reshape((len(arr_y_test), 1)), arr_y_pred.\n reshape((len(arr_y_pred), 1)), percentiles.reshape((len(percentiles\n ), 1))))\n data_df = pd.DataFrame(data)\n data_df.columns = ['actual', 'prob', 'percentile']\n data_df.actual = data_df.actual.astype(int)\n data_df.prob = data_df.prob.astype('float64')\n data_df = data_df.sort_values(by='prob', ascending=False)\n data_df = lift_calculations(data_df)\n return data_df\n\n\ndef lift_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n lift_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\ndef gain_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n gain_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\n<mask token>\n\n\ndef plot_lift_curves(percent_sample_covered, list_of_lift_metrics, labels,\n figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n plt.figure(figsize=figsize)\n for i, lift_scores in enumerate(list_of_lift_metrics):\n plt.plot(percent_sample_covered, lift_scores)\n plt.plot([percent_sample_covered.min(), 100], [1, 1])\n plt.title('Lift Curves Comparison', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n model_labels = labels + ['Random Guessing']\n if legend == 'on':\n plt.legend(model_labels, fontsize=12, loc='best')\n", "step-3": "<mask token>\n\n\ndef lift_calculations(df):\n df['sample_num'] = range(len(df))\n df['actual_sum'] = df['actual'].cumsum()\n df['per_sample_covered'] = (df['sample_num'] + 1) * 100 / len(df)\n df['per_pos_captured'] = df['actual_sum'] / len(df[df['actual'] == 1]\n ) * 100\n df['prop_pos_captured_from_all'] = df['per_pos_captured'] / df[\n 'per_sample_covered']\n return df\n\n\ndef gain_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(pd.Series([0]).append(df['per_sample_covered']), pd.Series([0]\n ).append(df['per_pos_captured']))\n plt.plot([0, 100], [0, 100])\n plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',\n fontsize=20)\n plt.xlabel('% of Sample Covered', fontsize=15)\n plt.ylabel('% of True Positives Captured', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=\n 12, loc=2)\n\n\ndef lift_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(df['per_sample_covered'], df['prop_pos_captured_from_all'])\n plt.plot([df['per_sample_covered'].min(), 100], [1, 1])\n plt.title('Lift Curve', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=12)\n\n\ndef percentile_gen(arr_y_pred):\n return np.array(pd.qcut(pd.Series(arr_y_pred).rank(method='first'), 100,\n labels=range(1, 101)))\n\n\ndef data_prep(arr_y_test, arr_y_pred):\n percentiles = percentile_gen(arr_y_pred)\n data = np.hstack((arr_y_test.reshape((len(arr_y_test), 1)), arr_y_pred.\n reshape((len(arr_y_pred), 1)), percentiles.reshape((len(percentiles\n ), 1))))\n data_df = pd.DataFrame(data)\n data_df.columns = ['actual', 'prob', 'percentile']\n data_df.actual = data_df.actual.astype(int)\n data_df.prob = data_df.prob.astype('float64')\n data_df = data_df.sort_values(by='prob', ascending=False)\n data_df = lift_calculations(data_df)\n return data_df\n\n\ndef lift_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n lift_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\ndef gain_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n gain_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\n<mask token>\n\n\ndef plot_lift_curves(percent_sample_covered, list_of_lift_metrics, labels,\n figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n plt.figure(figsize=figsize)\n for i, lift_scores in enumerate(list_of_lift_metrics):\n plt.plot(percent_sample_covered, lift_scores)\n plt.plot([percent_sample_covered.min(), 100], [1, 1])\n plt.title('Lift Curves Comparison', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n model_labels = labels + ['Random Guessing']\n if legend == 'on':\n plt.legend(model_labels, fontsize=12, loc='best')\n", "step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport datetime\n\n\ndef lift_calculations(df):\n df['sample_num'] = range(len(df))\n df['actual_sum'] = df['actual'].cumsum()\n df['per_sample_covered'] = (df['sample_num'] + 1) * 100 / len(df)\n df['per_pos_captured'] = df['actual_sum'] / len(df[df['actual'] == 1]\n ) * 100\n df['prop_pos_captured_from_all'] = df['per_pos_captured'] / df[\n 'per_sample_covered']\n return df\n\n\ndef gain_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(pd.Series([0]).append(df['per_sample_covered']), pd.Series([0]\n ).append(df['per_pos_captured']))\n plt.plot([0, 100], [0, 100])\n plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',\n fontsize=20)\n plt.xlabel('% of Sample Covered', fontsize=15)\n plt.ylabel('% of True Positives Captured', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=\n 12, loc=2)\n\n\ndef lift_plot(df, figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n fig = plt.figure(figsize=figsize)\n plt.plot(df['per_sample_covered'], df['prop_pos_captured_from_all'])\n plt.plot([df['per_sample_covered'].min(), 100], [1, 1])\n plt.title('Lift Curve', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n if legend == 'on':\n plt.legend(['Predictive Targeting', 'Random Targeting'], fontsize=12)\n\n\ndef percentile_gen(arr_y_pred):\n return np.array(pd.qcut(pd.Series(arr_y_pred).rank(method='first'), 100,\n labels=range(1, 101)))\n\n\ndef data_prep(arr_y_test, arr_y_pred):\n percentiles = percentile_gen(arr_y_pred)\n data = np.hstack((arr_y_test.reshape((len(arr_y_test), 1)), arr_y_pred.\n reshape((len(arr_y_pred), 1)), percentiles.reshape((len(percentiles\n ), 1))))\n data_df = pd.DataFrame(data)\n data_df.columns = ['actual', 'prob', 'percentile']\n data_df.actual = data_df.actual.astype(int)\n data_df.prob = data_df.prob.astype('float64')\n data_df = data_df.sort_values(by='prob', ascending=False)\n data_df = lift_calculations(data_df)\n return data_df\n\n\ndef lift_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n lift_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\ndef gain_curve(arr_y_test, arr_y_pred, figsize=None, x_range=None, y_range=\n None, legend='on'):\n data_df = data_prep(arr_y_test, arr_y_pred)\n gain_plot(data_df, figsize=figsize, x_range=x_range, y_range=y_range,\n legend=legend)\n plt.show()\n\n\ndef lift_values_generator(arr_y_test, arr_y_pred):\n data_df = data_prep(arr_y_test, arr_y_pred)\n return data_df.per_sample_covered, data_df.prop_pos_captured_from_all\n\n\ndef plot_lift_curves(percent_sample_covered, list_of_lift_metrics, labels,\n figsize=None, x_range=None, y_range=None, legend='on'):\n if figsize:\n plt.figure(figsize=figsize)\n for i, lift_scores in enumerate(list_of_lift_metrics):\n plt.plot(percent_sample_covered, lift_scores)\n plt.plot([percent_sample_covered.min(), 100], [1, 1])\n plt.title('Lift Curves Comparison', fontsize=20)\n plt.xlabel('% of Customers', fontsize=15)\n plt.ylabel('Lift', fontsize=15)\n if x_range:\n plt.xlim(x_range[0], x_range[1])\n if y_range:\n plt.ylim(y_range[0], y_range[1])\n model_labels = labels + ['Random Guessing']\n if legend == 'on':\n plt.legend(model_labels, fontsize=12, loc='best')\n", "step-5": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\nimport datetime\r\n\r\n#takes in a sorted data frame holding the actuals, the predicted values (sorted descending) and the percentile of each obsevation\r\n#and returns a new dataframe with all of the appropriate calculations\r\ndef lift_calculations(df):\r\n\r\n\t#adding a sample counter variable\r\n df['sample_num'] = range(len(df)) \r\n \r\n #adding cumulative sum of actual target\r\n df['actual_sum'] = df['actual'].cumsum()\r\n \r\n #column tracking the percentage of samples of total covered\r\n df['per_sample_covered'] = ((df['sample_num']+1)*100)/len(df)\r\n \r\n #percentage of all positives captured\r\n df['per_pos_captured'] = (df['actual_sum']/(len(df[df['actual']==1])))*100\r\n \r\n #proportion of positives captured from total\r\n df['prop_pos_captured_from_all'] = df['per_pos_captured']/df['per_sample_covered']\r\n\r\n return df\r\n\r\n#creates a plot of cumulative positive gain \r\n#takes in a dataframe with all of the relevant statistics already calculated\r\ndef gain_plot(df,figsize=None,x_range=None,y_range=None,legend='on'):\r\n\r\n\t #Plot of the cumulative capture of positives as we go across the deciles\r\n if figsize:\r\n \tfig = plt.figure(figsize=figsize)\r\n plt.plot(pd.Series([0]).append(df['per_sample_covered']),\r\n pd.Series([0]).append(df['per_pos_captured'])) #pre-pending zeos to the front of the series for polotting purposes\r\n plt.plot([0,100],[0,100])\r\n plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',fontsize=20) \r\n plt.xlabel('% of Sample Covered',fontsize=15)\r\n plt.ylabel('% of True Positives Captured',fontsize=15)\r\n if x_range:\r\n \tplt.xlim(x_range[0],x_range[1])\r\n if y_range:\r\n \tplt.ylim(y_range[0],y_range[1])\r\n if legend=='on':\r\n \tplt.legend(['Predictive Targeting','Random Targeting'],fontsize=12,loc=2)\r\n\r\ndef lift_plot(df,figsize=None,x_range=None,y_range=None,legend='on'):\r\n\r\n\t#Lift Curve Plot(at whatever percent of customers cutoff, the model is targeting X times better than random)\r\n #i.e. at the XX percentile, the response rate is Y times as good as it would be if targeting at random at the XX percentile\r\n if figsize:\r\n \tfig = plt.figure(figsize=figsize)\r\n plt.plot(df['per_sample_covered'],df['prop_pos_captured_from_all'])\r\n plt.plot([df['per_sample_covered'].min(),100],[1,1])\r\n plt.title('Lift Curve',fontsize=20)\r\n plt.xlabel('% of Customers',fontsize=15)\r\n plt.ylabel('Lift',fontsize=15)\r\n if x_range:\r\n \tplt.xlim(x_range[0],x_range[1])\r\n if y_range:\r\n \tplt.ylim(y_range[0],y_range[1])\r\n if legend=='on':\r\n \tplt.legend(['Predictive Targeting','Random Targeting'],fontsize=12)\r\n\r\n#a function which takes in an array of predicted values and returns the percentile associated with each one\r\ndef percentile_gen(arr_y_pred):\r\n\treturn np.array(pd.qcut(pd.Series(arr_y_pred).rank(method='first'),100,labels=range(1,101))) #method = first is used in the case when there are a lot of 0s and overlapping of labels\r\n\r\n#a function which takes in an array of actual test values and the model predicted values and stacks them together\r\n#then sorts them and puts them into a dataframe\r\ndef data_prep(arr_y_test,arr_y_pred):\r\n\r\n\t#assigning each observation into a percentile\r\n\tpercentiles = percentile_gen(arr_y_pred)\r\n\r\n\t#print(percentiles.shape)\r\n\r\n\t#joining all the pieces together\r\n\tdata = np.hstack((arr_y_test.reshape((len(arr_y_test),1)),\r\n\t\t\t\t\t arr_y_pred.reshape((len(arr_y_pred),1)),\r\n\t\t\t\t\t percentiles.reshape((len(percentiles),1))))\r\n\t\r\n\t#converting to a data frame\r\n\tdata_df = pd.DataFrame(data)\r\n\tdata_df.columns = ['actual','prob','percentile']\r\n\tdata_df.actual = data_df.actual.astype(int)\r\n\tdata_df.prob = data_df.prob.astype('float64')\r\n\t\r\n\t#sorting by the probability\r\n\tdata_df = data_df.sort_values(by='prob',ascending=False)\r\n\r\n\t#calculating lift metrics\r\n\tdata_df = lift_calculations(data_df)\r\n\r\n\treturn data_df\r\n\r\n#a function which plots the lift curve for the model\r\ndef lift_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'):\r\n\r\n\tdata_df = data_prep(arr_y_test,arr_y_pred)\r\n\r\n\t#print(data_df.groupby('percentile').size())\r\n\r\n\t#lift curve plot\r\n\tlift_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend)\r\n\tplt.show()\r\n\r\n#a function which plots the gain curve for the model\r\ndef gain_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'):\r\n\r\n\tdata_df = data_prep(arr_y_test,arr_y_pred)\r\n\r\n\t#gain curve plot\r\n\tgain_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend)\r\n\tplt.show()\r\n\r\n#a function which returns two numpy arrays:\r\n#the first one is the percent of samples covered (X-value)\r\n#the second being the lift values for the correponding the sample (Y-value)\r\ndef lift_values_generator(arr_y_test,arr_y_pred):\r\n\r\n\tdata_df = data_prep(arr_y_test,arr_y_pred)\r\n\r\n\treturn data_df.per_sample_covered, data_df.prop_pos_captured_from_all\r\n\r\n#a function which plots multiple lift curves all on the same graph\r\n#the first parameter is the x axis which represents %of the sample covered\r\n#the second parameter is a list of lists, where each one presents the lift\r\n#curve for a particular model, the last parameter holds the labels for the lift\r\n#curves in the corresponding order\r\n\r\ndef plot_lift_curves(percent_sample_covered,list_of_lift_metrics,labels,figsize=None,x_range=None,y_range=None,legend='on'):\r\n\r\n\tif figsize:\r\n\t\tplt.figure(figsize=figsize)\r\n\r\n\t#plotting the various model lift curves\r\n\tfor i,lift_scores in enumerate(list_of_lift_metrics):\r\n\t\tplt.plot(percent_sample_covered,lift_scores)\r\n\t#base line plot for random guessing\r\n\tplt.plot([percent_sample_covered.min(),100],[1,1])\r\n\t\r\n\t#formats and labels\r\n\tplt.title('Lift Curves Comparison',fontsize=20)\r\n\tplt.xlabel('% of Customers',fontsize=15)\r\n\tplt.ylabel('Lift',fontsize=15)\r\n\tif x_range:\r\n\t\tplt.xlim(x_range[0],x_range[1])\r\n\tif y_range:\r\n\t\tplt.ylim(y_range[0],y_range[1])\r\n\tmodel_labels = labels + ['Random Guessing']\r\n\tif legend == 'on':\r\n\t\tplt.legend(model_labels,fontsize=12,loc='best')\r\n", "step-ids": [ 5, 7, 8, 10, 11 ] }
[ 5, 7, 8, 10, 11 ]
import main from pytest import approx def test_duration(): ins = main.convert() names = ins.multiconvert() for name in names: induration, outduration = ins.ffprobe(name[0], name[1]) assert induration == approx(outduration) induration, outduration = ins.ffprobe(name[0], name[2]) assert induration == approx(outduration) print("All files are converted successfully!") if __name__ == '__main__': test_duration()
normal
{ "blob_id": "92c247b827d2ca4dce9b631a2c09f2800aabe216", "index": 6129, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\nif __name__ == '__main__':\n test_duration()\n", "step-4": "import main\nfrom pytest import approx\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\nif __name__ == '__main__':\n test_duration()\n", "step-5": "import main\nfrom pytest import approx\n\ndef test_duration():\n\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print(\"All files are converted successfully!\")\n\nif __name__ == '__main__':\n test_duration()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import veil_component with veil_component.init_component(__name__): from .material import list_category_materials from .material import list_material_categories from .material import list_issue_materials from .material import list_issue_task_materials from .material import get_material_image_url __all__ = [ list_category_materials.__name__, list_material_categories.__name__, list_issue_materials.__name__, list_issue_task_materials.__name__, get_material_image_url.__name__, ]
normal
{ "blob_id": "acad268a228b544d60966a8767734cbf9c1237ac", "index": 9979, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith veil_component.init_component(__name__):\n from .material import list_category_materials\n from .material import list_material_categories\n from .material import list_issue_materials\n from .material import list_issue_task_materials\n from .material import get_material_image_url\n __all__ = [list_category_materials.__name__, list_material_categories.\n __name__, list_issue_materials.__name__, list_issue_task_materials.\n __name__, get_material_image_url.__name__]\n", "step-3": "import veil_component\nwith veil_component.init_component(__name__):\n from .material import list_category_materials\n from .material import list_material_categories\n from .material import list_issue_materials\n from .material import list_issue_task_materials\n from .material import get_material_image_url\n __all__ = [list_category_materials.__name__, list_material_categories.\n __name__, list_issue_materials.__name__, list_issue_task_materials.\n __name__, get_material_image_url.__name__]\n", "step-4": "import veil_component\n\nwith veil_component.init_component(__name__):\n\n from .material import list_category_materials\n from .material import list_material_categories\n from .material import list_issue_materials\n from .material import list_issue_task_materials\n from .material import get_material_image_url\n\n __all__ = [\n list_category_materials.__name__,\n list_material_categories.__name__,\n list_issue_materials.__name__,\n list_issue_task_materials.__name__,\n get_material_image_url.__name__,\n ]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python # coding: utf-8 import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import tensorflow as tf print(tf.__version__) print(tf.keras.__version__) print(tf.__path__) import numpy as np from tqdm import tqdm, tqdm_notebook from utils import emphasis import tensorflow.keras.backend as K from tensorflow.keras.utils import Sequence import librosa import librosa.display print(tf.test.is_gpu_available()) # ## SRCNN class SubPixel1D(tf.keras.layers.Layer): def __init__(self, r=2): super(SubPixel1D, self).__init__() self.r = r def call(self, I): """One-dimensional subpixel upsampling layer Calls a tensorflow function that directly implements this functionality. We assume input has dim (batch, width, r) """ X = tf.transpose(I, [2,1,0]) # (r, w, b) X = tf.batch_to_space_nd(X, [self.r], [[0,0]]) # (1, r*w, b) X = tf.transpose(X, [2,1,0]) return X noisy = tf.keras.layers.Input(shape=(None, 1)) x_input = noisy x = x_input # B = 8 # n_filters = [128, 256, 512, 512, 512, 512, 512, 512] # kernel_sizes = [65, 33, 17, 9, 9, 9, 9, 9] B = 4 n_filters = [128, 256, 512, 512] kernel_sizes = [65, 33, 17, 9] # B = 3 # n_filters = [128, 256, 512] # kernel_sizes = [65, 33, 17] # B = 3 # n_filters = [64, 128, 256] # kernel_sizes = [65, 33, 17] # Downsampling Layers encoder_features = [] for k, n_filter, kernel_size in zip(range(B), n_filters, kernel_sizes): x = tf.keras.layers.Conv1D(filters = n_filter, kernel_size = kernel_size, strides = 2, padding = 'same', kernel_initializer = 'Orthogonal')(x) # x = tf.keras.layers.PReLU()(x) x = tf.keras.layers.LeakyReLU(0.2)(x) encoder_features.append(x) # Bottleneck Layer x = tf.keras.layers.Conv1D(filters = 512, kernel_size = 9, strides = 2, padding = 'same', kernel_initializer = 'Orthogonal')(x) x = tf.keras.layers.Dropout(rate=0.5)(x) # x = tf.keras.layers.PReLU()(x) x = tf.keras.layers.LeakyReLU(0.2)(x) # Upsampling Layer for k, n_filter, kernel_size, enc in reversed(list(zip(range(B), n_filters, kernel_sizes, encoder_features))): x = tf.keras.layers.Conv1D(filters = 2 * n_filter, kernel_size = kernel_size, strides = 1, padding = 'same', kernel_initializer = 'Orthogonal')(x) x = tf.keras.layers.Dropout(rate=0.5)(x) # x = tf.keras.layers.PReLU()(x) x = tf.keras.layers.ReLU()(x) x = SubPixel1D()(x) x = tf.keras.layers.Concatenate(axis=2)([x, enc]) # Final Conv Layer x = tf.keras.layers.Conv1D(filters = 2, kernel_size = 9, strides = 1, padding = 'same')(x) x = SubPixel1D()(x) x_final = tf.keras.layers.Add()([x, x_input]) G = tf.keras.models.Model(inputs = [noisy], outputs = [x_final]) # Train Model # Initialize Model optim = tf.keras.optimizers.Adam(lr=1e-4) def G_loss(true, fake): return K.mean(K.sqrt(K.mean((fake - true) ** 2 + 1e-6, axis=[1, 2])), axis=0) def G_LSD_loss(y_clean, y_noisy): y_clean = tf.squeeze(y_clean) y_noisy = tf.squeeze(y_noisy) D_clean = tf.signal.stft(signals = y_clean, frame_length = 2048, frame_step = 1024) D_noisy = tf.signal.stft(signals = y_noisy, frame_length = 2048, frame_step = 1024) D_clean_log = K.log(K.abs(D_clean) ** 2 + 1e-6) D_noisy_log = K.log(K.abs(D_noisy) ** 2 + 1e-6) return K.mean(K.sqrt(K.mean((D_clean_log - D_noisy_log) ** 2, axis = [2])), axis = [0, 1]) G.compile(loss = G_LSD_loss, optimizer = optim) G.summary() # tf.keras.utils.plot_model(G, to_file='./generator.png', show_shapes=True) # Training class data_sequence(Sequence): def __init__(self, data_path, batch_size = 64): self.filenames = [os.path.join(data_path, filename) for filename in os.listdir(data_path)] self.batch_size = batch_size def __len__(self): return int(np.ceil(len(self.filenames) / float(self.batch_size))) def on_epoch_end(self): np.random.shuffle(self.filenames) def __getitem__(self, idx): noisy_batch = [] clean_batch = [] for i in range(idx * self.batch_size, min(len(self.filenames), (idx + 1) * self.batch_size)): pair = np.load(self.filenames[i]) # pair = emphasis(pair[np.newaxis, :, :], emph_coeff=0.95).reshape(2, -1) clean = pair[0].reshape(-1, 1).astype('float32') noisy = pair[1].reshape(-1, 1).astype('float32') noisy_batch.append(noisy) clean_batch.append(clean) return np.array(noisy_batch), np.array(clean_batch) train_data_path = '../dataset/serialized_train_data' val_data_path = '../dataset/serialized_val_data' callbacks = [ tf.keras.callbacks.ModelCheckpoint(filepath='./model/weights_LSD.hdf5', verbose=1, save_best_only=True, save_weights_only=True), tf.keras.callbacks.TensorBoard(log_dir='./logs/LSD', update_freq='batch'), # tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-8), ] G.fit_generator(generator = data_sequence(train_data_path, 64), validation_data = data_sequence(val_data_path, 2), steps_per_epoch = 3325 // 64, verbose = 1, epochs = 400, callbacks = callbacks, max_queue_size = 10, use_multiprocessing = True, workers = 6, initial_epoch = 0)
normal
{ "blob_id": "08a0ab888886184f7447465508b6494b502821ea", "index": 8903, "step-1": "#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport tensorflow as tf\nprint(tf.__version__)\nprint(tf.keras.__version__)\nprint(tf.__path__)\nimport numpy as np\n\nfrom tqdm import tqdm, tqdm_notebook\nfrom utils import emphasis\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.utils import Sequence\nimport librosa\nimport librosa.display\n\nprint(tf.test.is_gpu_available())\n\n\n# ## SRCNN\nclass SubPixel1D(tf.keras.layers.Layer):\n def __init__(self, r=2):\n super(SubPixel1D, self).__init__()\n self.r = r\n def call(self, I):\n \"\"\"One-dimensional subpixel upsampling layer\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r)\n \"\"\"\n\n X = tf.transpose(I, [2,1,0]) # (r, w, b)\n X = tf.batch_to_space_nd(X, [self.r], [[0,0]]) # (1, r*w, b)\n X = tf.transpose(X, [2,1,0])\n return X\n\nnoisy = tf.keras.layers.Input(shape=(None, 1))\nx_input = noisy\nx = x_input\n\n# B = 8\n# n_filters = [128, 256, 512, 512, 512, 512, 512, 512]\n# kernel_sizes = [65, 33, 17, 9, 9, 9, 9, 9]\n\nB = 4\nn_filters = [128, 256, 512, 512]\nkernel_sizes = [65, 33, 17, 9]\n\n# B = 3\n# n_filters = [128, 256, 512]\n# kernel_sizes = [65, 33, 17]\n\n# B = 3\n# n_filters = [64, 128, 256]\n# kernel_sizes = [65, 33, 17]\n\n\n# Downsampling Layers\nencoder_features = []\nfor k, n_filter, kernel_size in zip(range(B), n_filters, kernel_sizes):\n x = tf.keras.layers.Conv1D(filters = n_filter,\n kernel_size = kernel_size,\n strides = 2,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\n # x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.LeakyReLU(0.2)(x)\n encoder_features.append(x)\n \n# Bottleneck Layer\nx = tf.keras.layers.Conv1D(filters = 512,\n kernel_size = 9,\n strides = 2,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\nx = tf.keras.layers.Dropout(rate=0.5)(x)\n# x = tf.keras.layers.PReLU()(x)\nx = tf.keras.layers.LeakyReLU(0.2)(x)\n\n# Upsampling Layer\nfor k, n_filter, kernel_size, enc in reversed(list(zip(range(B), \n n_filters, \n kernel_sizes, \n encoder_features))):\n x = tf.keras.layers.Conv1D(filters = 2 * n_filter,\n kernel_size = kernel_size,\n strides = 1,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\n x = tf.keras.layers.Dropout(rate=0.5)(x)\n # x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.ReLU()(x)\n x = SubPixel1D()(x)\n x = tf.keras.layers.Concatenate(axis=2)([x, enc])\n\n# Final Conv Layer\nx = tf.keras.layers.Conv1D(filters = 2,\n kernel_size = 9,\n strides = 1,\n padding = 'same')(x)\nx = SubPixel1D()(x)\nx_final = tf.keras.layers.Add()([x, x_input]) \nG = tf.keras.models.Model(inputs = [noisy], outputs = [x_final]) \n\n\n# Train Model\n# Initialize Model\n\noptim = tf.keras.optimizers.Adam(lr=1e-4)\ndef G_loss(true, fake):\n return K.mean(K.sqrt(K.mean((fake - true) ** 2 + 1e-6, axis=[1, 2])), axis=0)\n\ndef G_LSD_loss(y_clean, y_noisy):\n y_clean = tf.squeeze(y_clean)\n y_noisy = tf.squeeze(y_noisy)\n \n D_clean = tf.signal.stft(signals = y_clean,\n frame_length = 2048,\n frame_step = 1024)\n D_noisy = tf.signal.stft(signals = y_noisy,\n frame_length = 2048,\n frame_step = 1024)\n \n D_clean_log = K.log(K.abs(D_clean) ** 2 + 1e-6)\n D_noisy_log = K.log(K.abs(D_noisy) ** 2 + 1e-6)\n\n\treturn K.mean(K.sqrt(K.mean((D_clean_log - D_noisy_log) ** 2, axis = [2])), axis = [0, 1])\n\nG.compile(loss = G_LSD_loss,\n optimizer = optim)\nG.summary()\n# tf.keras.utils.plot_model(G, to_file='./generator.png', show_shapes=True)\n\n\n# Training\n\nclass data_sequence(Sequence):\n def __init__(self, data_path, batch_size = 64):\n self.filenames = [os.path.join(data_path, filename) for filename in os.listdir(data_path)]\n self.batch_size = batch_size\n \n def __len__(self):\n return int(np.ceil(len(self.filenames) / float(self.batch_size)))\n \n def on_epoch_end(self):\n np.random.shuffle(self.filenames)\n\n def __getitem__(self, idx):\n noisy_batch = []\n clean_batch = []\n \n for i in range(idx * self.batch_size, min(len(self.filenames), (idx + 1) * self.batch_size)):\n pair = np.load(self.filenames[i])\n # pair = emphasis(pair[np.newaxis, :, :], emph_coeff=0.95).reshape(2, -1)\n clean = pair[0].reshape(-1, 1).astype('float32')\n noisy = pair[1].reshape(-1, 1).astype('float32')\n \n noisy_batch.append(noisy)\n clean_batch.append(clean)\n\n return np.array(noisy_batch), np.array(clean_batch)\n \n \ntrain_data_path = '../dataset/serialized_train_data'\nval_data_path = '../dataset/serialized_val_data' \n \ncallbacks = [\n tf.keras.callbacks.ModelCheckpoint(filepath='./model/weights_LSD.hdf5', \n verbose=1,\n save_best_only=True,\n save_weights_only=True),\n tf.keras.callbacks.TensorBoard(log_dir='./logs/LSD', update_freq='batch'),\n # tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-8),\n ]\n \nG.fit_generator(generator = data_sequence(train_data_path, 64),\n validation_data = data_sequence(val_data_path, 2),\n steps_per_epoch = 3325 // 64, \n verbose = 1,\n epochs = 400,\n callbacks = callbacks,\n max_queue_size = 10,\n use_multiprocessing = True,\n workers = 6,\n initial_epoch = 0)\n\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# #writing a file # fout = open('Session14/output.txt', 'w') # line1 = "How many roads must a man walk down\n" # fout.write(line1) # line2 = "Before you call him a man?\n" # fout.write(line2) # #when you are done writing, you should close the file. # fout.close() # #if you dont close the file, it gets closed for you when the program dies #exercise 1 # def sed(pattern, replace, source, dest): # with open(source, 'r') as f_r: # with open(dest, 'w') as f_w: # for line in f_r: # new_line = line.replace(pattern, replace) # f_w.write(new_line) # pattern = " man " # replace = " woman " # source = "Session14/output.txt" # dest = "Session14/output2.txt" # sed(pattern, replace, source, dest) import os cwd = os.getcwd() #cwd stands for "current working directory" # print(cwd) #os.path provides other functions for working with filenames and paths # os.path.abspath('output.txt') # os.path.exists('output.txt') # os.path.isdir('output.txt') # os.path.isdir('/exercises') # os.path.isfile('output.txt') # os.listdir(cwd) def walk(dirname): """Prints the names of all files in dirname and its subdirectories. dirname: string name of directory """ for name in os.listdir(dirname): path = os.path.join(dirname, name) if os.path.isfile(path): print(path) else: walk(path) #os.path.join takes a directory and a file name and joins them inot a complete path def walk2(dirname): """Prints the names of all files in dirname and its subdirectories. dirname: string name of directory """ for root, dirs, files in os.walk(dirname): for filename in files: print(os.path.join(root, filename))
normal
{ "blob_id": "de1262da699a18266ad8673597391f625783a44d", "index": 5721, "step-1": "<mask token>\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n", "step-2": "<mask token>\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n", "step-3": "<mask token>\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n", "step-4": "import os\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n", "step-5": "# #writing a file\n# fout = open('Session14/output.txt', 'w')\n# line1 = \"How many roads must a man walk down\\n\"\n# fout.write(line1)\n# line2 = \"Before you call him a man?\\n\"\n# fout.write(line2)\n# #when you are done writing, you should close the file.\n# fout.close()\n# #if you dont close the file, it gets closed for you when the program dies\n\n#exercise 1\n# def sed(pattern, replace, source, dest):\n# with open(source, 'r') as f_r:\n# with open(dest, 'w') as f_w:\n# for line in f_r:\n# new_line = line.replace(pattern, replace)\n# f_w.write(new_line)\n\n# pattern = \" man \"\n# replace = \" woman \"\n# source = \"Session14/output.txt\"\n# dest = \"Session14/output2.txt\"\n# sed(pattern, replace, source, dest)\n\nimport os\ncwd = os.getcwd()\n#cwd stands for \"current working directory\"\n# print(cwd)\n\n#os.path provides other functions for working with filenames and paths\n# os.path.abspath('output.txt')\n# os.path.exists('output.txt')\n# os.path.isdir('output.txt')\n# os.path.isdir('/exercises')\n# os.path.isfile('output.txt')\n# os.listdir(cwd)\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\" \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n#os.path.join takes a directory and a file name and joins them inot a complete path\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Adjust figure when using plt.gcf ax = fig.gca() ax.set_aspect('equal')
normal
{ "blob_id": "24246427e2fde47bbc9d068605301f54c6ecbae5", "index": 1797, "step-1": "<mask token>\n", "step-2": "<mask token>\nax.set_aspect('equal')\n", "step-3": "ax = fig.gca()\nax.set_aspect('equal')\n", "step-4": "# Adjust figure when using plt.gcf\nax = fig.gca()\nax.set_aspect('equal')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import logging import os import time import urllib from collections import namedtuple from statistics import mean from urllib.request import urlopen import bs4 import regex as re from tika import parser from scipy.stats import ks_2samp import config from TFU.trueformathtml import TrueFormatUpmarkerHTML from TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX from helpers.str_tools import remove_ugly_chars class PaperReader: """ multimedial extractor. it reads text from papers in pdfs, urls, html and other things. Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page, page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the texts of different pages, where sentences continue. detecting text by comparing to the letter distribution of normal prose to parts of the text extracted. """ def __init__(self, _threshold=0.001, _length_limit=20000): with open(config.wordlist, 'r') as f: self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4] self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX() self.tfu_html = TrueFormatUpmarkerHTML() self.length_limit = _length_limit self.threshold = _threshold self.normal_data = list( 'used are variants of the predicate calculus. He even says, Lately ' 'those who think they ought to be so regarded seem to be winning. ' 'Under these circumstances, it does seem odd for McDermott to devote ' 'much space to complaining about the logical basis of a book whose ' 'very title proclaims it is about logical foundations. In any ' 'case, given such a title, it wouldnt seem necessary that readers ' 'should be warned that the foundations being explored are not ' 'In competition with this diversity is the idea of a unified model ' 'of inference. The desire for such a model is strong among those ' 'who study declarative representations, and Genesereth and Nilsson ' 'are no exception. As are most of their colleagues, they are drawn ' 'to the model of inference as the derivation of conclusions that ' 'are entailed by a set of beliefs. They wander from this idea in a ' 'few places but not for long. It is not hard to see why: Deduction ' 'is one of the fews kinds of inference for which we have an ' 'interesting general theory. '.lower() ) def just_extract_text_from_html(self, adress): logging.info(f"extracting text from {adress}") try: with urlopen(adress).read().decode('utf-8') as fdoc: soup = bs4.BeautifulSoup(fdoc, parent="lxml") return self.get_only_real_words(soup.get_text(), self.wordlist) except ValueError: with open(adress, "r") as fdoc: soup = bs4.BeautifulSoup(fdoc, features='lxml') return self.get_only_real_words(soup.get_text(), self.wordlist) def parse_file_format(self, adress): if adress.endswith('pdf'): paths = self.pdfpath2htmlpaths(adress) if config.parse_pdf2htmlEX: os.system(f"pdf2htmlEX " f"--optimize-text 1 " f"--fit-width {config.reader_width} " f"\"{adress}\" \"{paths.html_before_indexing}\"") tfu = self.tfu_pdf elif adress.endswith('html'): tfu = self.tfu_html paths = self.htmlpath2htmlpaths(adress) logging.warning("trying with html...") else: logging.error(f"File '{adress}' could not be processed") return None tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing) tfu.save_doc_json(paths.json_path) os.system(f"cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"") self.text = " ".join(list(tfu.indexed_words.values())) # needed for topic modelling with open(paths.txt_path, "w") as f: f.write(self.text) logging.debug(paths) self.paths = paths time.sleep(2) logging.info(f"extracted text: {self.text[100:]}") return None def load_url(self, adress): response = urllib.request.urlopen(adress) data = response.read() # a `bytes` object self.text = parser.from_buffer(data) def analyse(self): """ Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc. :return str: prose text """ logging.info("transferring text to CorpusCook...") paragraphs = self.text.split('\n\n') print("mean length of splitted lines", (mean([len(p) for p in paragraphs]))) # If TIKA resolved '\n' if (mean([len(p) for p in paragraphs])) > 80: paragraphs = [re.sub(r"- *\n", '', p) for p in paragraphs] paragraphs = [p.replace('\n', " ") for p in paragraphs] paragraphs = [p.replace(';', " ") for p in paragraphs] joiner = " " else: # If TIKA did not joiner = " " processed_text = joiner.join([p for p in paragraphs if p and ks_2samp(self.normal_data, list(p)).pvalue > self.threshold ] ) return processed_text.strip()[:self.length_limit] DocPaths = namedtuple("DocPaths", ["html_before_indexing", "html_after_indexing", "apache_path", "json_path", "txt_path"]) def pdfpath2htmlpaths(self, adress): # file_extension = os.path.splitext(adress)[1] keep it, but unused # path = os.path.dirname(adress) filename = os.path.basename(adress) html_before_indexing = config.appcorpuscook_docs_document_dir + filename + ".html" filename = remove_ugly_chars(filename) html_after_indexing = config.appcorpuscook_docs_document_dir + filename + ".pdf2htmlEX.html" json_path = config.appcorpuscook_docs_json_dir + filename + ".json" txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt" apache_path = config.apache_dir_document + filename + ".html" return self.DocPaths( html_before_indexing, html_after_indexing, apache_path, json_path, txt_path) def get_only_real_words(self, text, wordlist): return text #" ".join([word for word in text.split() if word in wordlist]) def htmlpath2htmlpaths(self, adress): filename = os.path.basename(adress) html_before_indexing = config.appcorpuscook_diff_document_dir + filename filename = remove_ugly_chars(filename) html_after_indexing = config.appcorpuscook_diff_html_dir + filename + ".pdf2htmlEX.html" json_path = config.appcorpuscook_diff_json_dir + filename + ".json" txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt" apache_path = config.apache_dir_document + filename + ".html" return self.DocPaths( html_before_indexing, html_after_indexing, apache_path, json_path, txt_path)
normal
{ "blob_id": "4d2cb3e0bdd331a1de7f07eb0109f02c9cf832a8", "index": 7441, "step-1": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n <mask token>\n <mask token>\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n", "step-2": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n", "step-3": "<mask token>\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n", "step-4": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n", "step-5": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\n\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately '\n 'those who think they ought to be so regarded seem to be winning. '\n 'Under these circumstances, it does seem odd for McDermott to devote '\n 'much space to complaining about the logical basis of a book whose '\n 'very title proclaims it is about logical foundations. In any '\n 'case, given such a title, it wouldnt seem necessary that readers '\n 'should be warned that the foundations being explored are not '\n 'In competition with this diversity is the idea of a unified model '\n 'of inference. The desire for such a model is strong among those '\n 'who study declarative representations, and Genesereth and Nilsson '\n 'are no exception. As are most of their colleagues, they are drawn '\n 'to the model of inference as the derivation of conclusions that '\n 'are entailed by a set of beliefs. They wander from this idea in a '\n 'few places but not for long. It is not hard to see why: Deduction '\n 'is one of the fews kinds of inference for which we have an '\n 'interesting general theory. '.lower()\n )\n\n def just_extract_text_from_html(self, adress):\n logging.info(f\"extracting text from {adress}\")\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent=\"lxml\")\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, \"r\") as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n\n if config.parse_pdf2htmlEX:\n os.system(f\"pdf2htmlEX \"\n f\"--optimize-text 1 \"\n f\"--fit-width {config.reader_width} \"\n f\"\\\"{adress}\\\" \\\"{paths.html_before_indexing}\\\"\")\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning(\"trying with html...\")\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n\n tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f\"cp \\\"{paths.html_after_indexing}\\\" \\\"{paths.apache_path}\\\"\")\n self.text = \" \".join(list(tfu.indexed_words.values()))\n\n\n # needed for topic modelling\n with open(paths.txt_path, \"w\") as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n\n logging.info(f\"extracted text: {self.text[100:]}\")\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read() # a `bytes` object\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]\n\n DocPaths = namedtuple(\"DocPaths\", [\"html_before_indexing\",\n \"html_after_indexing\",\n \"apache_path\",\n \"json_path\",\n \"txt_path\"])\n\n def pdfpath2htmlpaths(self, adress):\n # file_extension = os.path.splitext(adress)[1] keep it, but unused\n # path = os.path.dirname(adress)\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_docs_document_dir + filename + \".html\"\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_docs_document_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_docs_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text #\" \".join([word for word in text.split() if word in wordlist])\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_diff_document_dir + filename\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_diff_html_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_diff_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n", "step-ids": [ 8, 10, 11, 12, 13 ] }
[ 8, 10, 11, 12, 13 ]
# Example solution for HW 5 # %% # Import the modules we will use import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %% # ** MODIFY ** # Set the file name and path to where you have stored the data filename = 'streamflow_week5.txt' #modified filename filepath = os.path.join('../data', filename) #modified path to look one directory up print(os.getcwd()) print(filepath) #filepath = '../Assignments/Solutions/data/streamflow_week5.txt' # %% #Read the data into a pandas dataframe data=pd.read_table(filepath, sep = '\t', skiprows=30, names=['agency_cd', 'site_no', 'datetime', 'flow', 'code'] ) # Expand the dates to year month day data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True) data['year'] = data['year'].astype(int) data['month'] = data['month'].astype(int) data['day'] = data['day'].astype(int) # %% # Sorry no more helpers past here this week, you are on your own now :) # Hints - you will need the functions: describe, info, groupby, sort, head and tail. # %% Start of Mekha's code # 1 and 2 week forecast # Look at most recent 2 weeks of data ending 9/26 print(data.tail(14)) # Calculate avg of last two week's flow print(data.tail(14).describe()) # Calculate avg of last week's flow print(data.tail(7).describe()) # Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year data_2019 = data[data['year']==2019] print(data_2019['flow'].describe()) # Look at stats for 2019 by month print(data_2019.groupby(['month'])[['flow']].describe()) # %% 1. Provide a summary of the data frames properties. # What are the column names? # What is its index? # What data types do each of the columns have? print(data.info()) # %% 2.Provide a summary of the flow column including the min, mean, max, standard # deviation and quartiles. print(data['flow'].describe()) # %% 3.Provide the same information but on a monthly basis. (Note: you should be # able to do this with one or two lines of code) print(data.groupby(['month'])[['flow']].describe()) # %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period # of record. Include the date, month and flow values in your summary. # 5 highest print(data.sort_values(by="flow",ascending=True).tail()) # 5 lowest print(data.sort_values(by="flow",ascending=True).head()) # %% 5.Find the highest and lowest flow values for every month of the year (i.e. you # will find 12 maxes and 12 mins) and report back what year these occurred in. # highest value for each month for i in range(1,13): month_data = data[data['month']==i] print(month_data.nlargest(1,['flow'])) # lowest value for each month for i in range(1,13): month_data = data[data['month']==i] print(month_data.nsmallest(1,['flow'])) # %% 6.Provide a list of historical dates with flows that are within 10% of your week 1 # forecast value. If there are none than increase the %10 window until you have at # least one other value and report the date and the new window you used forecast = 58.4 data_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))] pd.set_option('display.max_rows', None) print(data_10percent['datetime']) # %%
normal
{ "blob_id": "5024db0538f0022b84c203882df9c35979ba978a", "index": 4571, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(os.getcwd())\nprint(filepath)\n<mask token>\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\n<mask token>\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\n<mask token>\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-3": "<mask token>\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-5": "# Example solution for HW 5\n\n# %%\n# Import the modules we will use\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n# ** MODIFY **\n# Set the file name and path to where you have stored the data\nfilename = 'streamflow_week5.txt' #modified filename\nfilepath = os.path.join('../data', filename) #modified path to look one directory up\nprint(os.getcwd())\nprint(filepath)\n\n#filepath = '../Assignments/Solutions/data/streamflow_week5.txt'\n\n# %%\n#Read the data into a pandas dataframe\ndata=pd.read_table(filepath, sep = '\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']\n )\n\n# Expand the dates to year month day\ndata[[\"year\", \"month\", \"day\"]] =data[\"datetime\"].str.split(\"-\", expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\n\n# %%\n# Sorry no more helpers past here this week, you are on your own now :) \n# Hints - you will need the functions: describe, info, groupby, sort, head and tail.\n\n# %% Start of Mekha's code\n\n# 1 and 2 week forecast\n\n# Look at most recent 2 weeks of data ending 9/26\nprint(data.tail(14))\n\n# Calculate avg of last two week's flow\nprint(data.tail(14).describe())\n\n# Calculate avg of last week's flow\nprint(data.tail(7).describe())\n\n# Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year\ndata_2019 = data[data['year']==2019]\nprint(data_2019['flow'].describe())\n\n# Look at stats for 2019 by month\nprint(data_2019.groupby(['month'])[['flow']].describe())\n\n# %% 1. Provide a summary of the data frames properties.\n# What are the column names?\n# What is its index?\n# What data types do each of the columns have?\nprint(data.info())\n\n# %% 2.Provide a summary of the flow column including the min, mean, max, standard \n# deviation and quartiles.\nprint(data['flow'].describe())\n\n# %% 3.Provide the same information but on a monthly basis. (Note: you should be \n# able to do this with one or two lines of code)\nprint(data.groupby(['month'])[['flow']].describe())\n\n# %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period \n# of record. Include the date, month and flow values in your summary.\n\n# 5 highest\nprint(data.sort_values(by=\"flow\",ascending=True).tail())\n\n# 5 lowest\nprint(data.sort_values(by=\"flow\",ascending=True).head())\n\n\n# %% 5.Find the highest and lowest flow values for every month of the year (i.e. you \n# will find 12 maxes and 12 mins) and report back what year these occurred in.\n\n# highest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nlargest(1,['flow']))\n\n# lowest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nsmallest(1,['flow']))\n\n# %% 6.Provide a list of historical dates with flows that are within 10% of your week 1 \n# forecast value. If there are none than increase the %10 window until you have at \n# least one other value and report the date and the new window you used\n\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n\n# %%\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import random #importing the random library from python answers = ["It is certain", "Without a doubt", "Yes, definitely", "You may rely on it", "As I see it, yes", "Most likely", "Outlook good", "Yes", "Signs point to yes", "Reply hazy, try again", "Ask again later", "Better not tell you now", "Cannot predict now", "Concentrate and ask again", "Don't count on it", "My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"] #here, we declare a list of strings. ans = '!' #we give ans a value so that the while loop will execute. while ans: #This will keep on looping as long as ans is not blank. If a variable stores nothing, it returns false when checked ans = input("Ask the magic 8 ball a question. (Press enter to leave): \n") #The reason we store the input is so the user can exit the program by passing in nothing for ans print(random.choice(answers)) #the random library lets us draw a random string from a list. We then print it
normal
{ "blob_id": "b5e9af166f3b55e44d9273077e5acd05b1fd68fa", "index": 2335, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n", "step-3": "<mask token>\nanswers = ['It is certain', 'Without a doubt', 'Yes, definitely',\n 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',\n 'Yes', 'Signs point to yes', 'Reply hazy, try again', 'Ask again later',\n 'Better not tell you now', 'Cannot predict now',\n 'Concentrate and ask again', \"Don't count on it\", 'My reply is no',\n 'My sources say no', 'Outlook not so good', 'Very doubtful']\nans = '!'\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n", "step-4": "import random\nanswers = ['It is certain', 'Without a doubt', 'Yes, definitely',\n 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',\n 'Yes', 'Signs point to yes', 'Reply hazy, try again', 'Ask again later',\n 'Better not tell you now', 'Cannot predict now',\n 'Concentrate and ask again', \"Don't count on it\", 'My reply is no',\n 'My sources say no', 'Outlook not so good', 'Very doubtful']\nans = '!'\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n", "step-5": "import random #importing the random library from python\nanswers = [\"It is certain\", \"Without a doubt\", \"Yes, definitely\",\n \"You may rely on it\", \"As I see it, yes\", \"Most likely\",\n \"Outlook good\", \"Yes\", \"Signs point to yes\", \"Reply hazy, try again\",\n \"Ask again later\", \"Better not tell you now\", \"Cannot predict now\",\n \"Concentrate and ask again\", \"Don't count on it\", \"My reply is no\",\n \"My sources say no\", \"Outlook not so good\", \"Very doubtful\"] #here, we declare a list of strings. \nans = '!' #we give ans a value so that the while loop will execute. \nwhile ans: #This will keep on looping as long as ans is not blank. If a variable stores nothing, it returns false when checked\n ans = input(\"Ask the magic 8 ball a question. (Press enter to leave): \\n\") \n #The reason we store the input is so the user can exit the program by passing in nothing for ans\n print(random.choice(answers)) #the random library lets us draw a random string from a list. We then print it\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# ---------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License # ---------------------------------------------------------------------- """Contains the Plugin object""" import itertools import os import sys import textwrap from collections import OrderedDict import six import CommonEnvironment from CommonEnvironment.CallOnExit import CallOnExit from CommonEnvironment import StringHelpers from CommonEnvironment import Interface # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- sys.path.insert(0, os.path.join(_script_dir, "..")) with CallOnExit(lambda: sys.path.pop(0)): from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase # ---------------------------------------------------------------------- @Interface.staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Properties Name = Interface.DerivedProperty("SharedLibraryTests") Description = Interface.DerivedProperty( "Generates code used when testing the Shared Library import/export layer", ) # ---------------------------------------------------------------------- # | Methods @staticmethod @Interface.override def Generate( open_file_func, global_custom_structs, global_custom_enums, data, output_dir, status_stream, ): result_code = 0 status_stream.write("Preprocessing data...") with status_stream.DoneManager(): type_info_data = [] for items in data: type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items]) status_stream.write("Generating Common Files...") with status_stream.DoneManager() as this_dm: this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream) if this_dm.result != 0: return this_dm.result for desc, func in [("Generating .h files...", _GenerateHeaderFile)]: status_stream.write(desc) with status_stream.DoneManager( suffix="\n", ) as dm: for index, (items, items_type_info_data) in enumerate( zip(data, type_info_data), ): dm.stream.write( "Processing '{}' ({} of {})...".format( items[0].name, index + 1, len(data), ), ) with dm.stream.DoneManager() as this_dm: this_dm.result = func( open_file_func, output_dir, items, items_type_info_data, this_dm.stream, ) if dm.result < 0: return dm.result result_code = result_code or dm.result return result_code # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream): with open_file_func( os.path.join(output_dir, "SharedLibraryTests_{}.h".format(items[0].name)), "w", ) as f: f.write( textwrap.dedent( """\ /* ---------------------------------------------------------------------- */ /* Copyright (c) Microsoft Corporation. All rights reserved. */ /* Licensed under the MIT License */ /* ---------------------------------------------------------------------- */ #pragma once #include "SharedLibrary_{name}.h" #include "Traits.h" #include "Featurizers/Structs.h" #include "SharedLibraryTests_Common.hpp" #if (defined _MSC_VER) # pragma warning(push) // I don't know why MSVC thinks that there is unreachable // code in these methods during release builds. # pragma warning(disable: 4702) // Unreachable code # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used #endif """, ).format( name=items[0].name, ), ) for item, type_info_data in zip(items, all_type_info_data): template = getattr(item, "template", None) if template: suffix = "_{}_".format(template) type_desc = " <{}>".format(template) cpp_template_suffix = "<{}>".format( type_info_data.InputTypeInfo.CppType, ) else: suffix = "_" type_desc = "" cpp_template_suffix = "" if type_info_data.ConfigurationParamTypeInfos: constructor_template_params = ", typename... ConstructorArgTs" constructor_params = ",\n ConstructorArgTs &&... constructor_args" constructor_args = "std::forward<ConstructorArgTs>(constructor_args)..., " else: constructor_template_params = "" constructor_params = "" constructor_args = "" fit_prefix_statements = "" transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs() if isinstance(transform_input_args, tuple): transform_input_args, fit_prefix_statements = transform_input_args # Special processing for vector<bool> if type_info_data.InputTypeInfo.TypeName == "bool": # vector<bool> isn't actually a bool, so we can't take a direct reference to it for_loop = "for(bool input : inference_input)" else: for_loop = "for(auto const & input : inference_input)" if type_info_data.OutputTypeInfo.TypeName == "bool": # vector<bool> doesn't support emplace_back on some platforms invocation_template = "results.push_back({});" else: invocation_template = "results.emplace_back({});" # Get the output statement information if item.has_dynamic_output: output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo( invocation_template=invocation_template, result_name="results", ) else: output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo( invocation_template=invocation_template, result_name="results", ) # Write the training statements f.write( textwrap.dedent( """\ /* ---------------------------------------------------------------------- */ /* | {name}{type_desc} */ template <typename VectorInputT{constructor_template_params}> void {name}{suffix}Test( std::vector<VectorInputT> const &training_input, std::vector<VectorInputT> const &inference_input, std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params} ) {{ ErrorInfoHandle * pErrorInfo(nullptr); // Create the estimator {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr); REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo)); REQUIRE(pEstimatorHandle != nullptr); REQUIRE(pErrorInfo == nullptr); // Train if(training_input.empty() == false) {{ typename std::vector<VectorInputT>::const_iterator iter(training_input.begin()); while(true) {{ TrainingState trainingState(0); REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); if(trainingState != Training) break; FitResult result(0); auto const & input(*iter); {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); if(result == ResetAndContinue) {{ iter = training_input.begin(); continue; }} ++iter; if(iter == training_input.end()) {{ REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); iter = training_input.begin(); }} }} }} {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo); REQUIRE(pErrorInfo == nullptr); // Once here, training should be complete {{ bool is_complete(false); REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); REQUIRE(is_complete); }} // Create the Transformer {name}{suffix}TransformerHandle * pTransformerHandle(nullptr); REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo)); REQUIRE(pTransformerHandle != nullptr); REQUIRE(pErrorInfo == nullptr); // Destroy the estimator REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); """, ).format( name=item.name, type_desc=type_desc, suffix=suffix, vector_result_type=output_statement_info.VectorResultType, constructor_template_params=constructor_template_params, constructor_params=constructor_params, constructor_args=constructor_args, fit_input_args=transform_input_args, fit_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format( StringHelpers.LeftJustify( fit_prefix_statements.rstrip(), 12, ), ), ), ) # Write the inferencing statements inline_destroy_statement = "// No inline destroy statement" trailing_destroy_statement = "// No trailing destroy statement" if output_statement_info.DestroyArgs: if output_statement_info.DestroyInline: inline_destroy_statement = textwrap.dedent( """\ // Destroy the contents REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); """, ).format( name=item.name, suffix=suffix, args=output_statement_info.DestroyArgs, ) else: trailing_destroy_statement = textwrap.dedent( """\ for(auto & {var_name}: results) {{ REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); }} """, ).format( name=item.name, suffix=suffix, args=output_statement_info.DestroyArgs, var_name=output_statement_info.DestroyVarName or "result", ) if item.has_dynamic_output: f.write( StringHelpers.LeftJustify( textwrap.dedent( """\ // Inference std::vector<{vector_result_type}> results; {for_loop} {{ {transform_prefix_statements}{transform_vars} REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); {transform_statement} {inline_destroy_statement} }} if(true) {{ {transform_vars} REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); {transform_statement} {inline_destroy_statement} }} """, ).format( name=item.name, suffix=suffix, vector_result_type=output_statement_info.VectorResultType, for_loop=for_loop, transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format( StringHelpers.LeftJustify( fit_prefix_statements, 4, ).rstrip(), ), transform_vars=StringHelpers.LeftJustify( "\n".join( [ "{} {};".format(var.Type, var.Name) for var in output_statement_info.TransformVars ] ), 4, ), transform_input_args=transform_input_args, transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]), transform_statement=StringHelpers.LeftJustify( output_statement_info.AppendResultStatement.rstrip(), 4, ), inline_destroy_statement=StringHelpers.LeftJustify( inline_destroy_statement.rstrip(), 4, ), ), 4, skip_first_line=False, ), ) else: f.write( StringHelpers.LeftJustify( textwrap.dedent( """\ // Inference std::vector<{vector_result_type}> results; results.reserve(inference_input.size()); {for_loop} {{ {transform_prefix_statements}{transform_vars} REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); {transform_statement} {inline_destroy_statement} }} """, ).format( name=item.name, suffix=suffix, vector_result_type=output_statement_info.VectorResultType, for_loop=for_loop, transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format( StringHelpers.LeftJustify( fit_prefix_statements, 4, ).rstrip(), ), transform_vars=StringHelpers.LeftJustify( "\n".join( [ "{} {};".format(var.Type, var.Name) for var in output_statement_info.TransformVars ] ), 4, ), transform_input_args=transform_input_args, transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]), transform_statement=StringHelpers.LeftJustify( output_statement_info.AppendResultStatement.rstrip(), 4, ), inline_destroy_statement=StringHelpers.LeftJustify( inline_destroy_statement.rstrip(), 4, ), ), 4, skip_first_line=False, ), ) f.write( textwrap.dedent( """\ REQUIRE(verify_func(results)); {trailing_destroy_statement} // Destroy the transformer REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo)); REQUIRE(pErrorInfo == nullptr); }} """, ).format( name=item.name, suffix=suffix, trailing_destroy_statement=StringHelpers.LeftJustify( trailing_destroy_statement.rstrip(), 4, ), ), ) f.write( textwrap.dedent( """\ #if (defined _MSC_VER) # pragma warning(pop) #endif """, ), ) # ---------------------------------------------------------------------- def _GenerateCommonFiles(open_file_func, output_dir, output_stream): with open_file_func( os.path.join(output_dir, "SharedLibraryTests_Common.hpp"), "w", ) as f: f.write( textwrap.dedent( """\ /* ---------------------------------------------------------------------- */ /* Copyright (c) Microsoft Corporation. All rights reserved. */ /* Licensed under the MIT License */ /* ---------------------------------------------------------------------- */ #pragma once #include "SharedLibrary_Common.hpp" #if (defined _MSC_VER) # pragma warning(push) // I don't know why MSVC thinks that there is unreachable // code in these methods during release builds. # pragma warning(disable: 4702) // Unreachable code # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used #endif """, ), ) for type_info_class in TypeInfoData.EnumTypeInfoClasses(): type_info_class.CreateHelperMethods(f) f.write( textwrap.dedent( """\ #if (defined _MSC_VER) # pragma warning(pop) #endif """, ), ) return 0 # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- class TypeInfoData(object): # ---------------------------------------------------------------------- # | # | Public Methods # | # ---------------------------------------------------------------------- def __init__(self, item, global_custom_structs, global_custom_enums): # Create the custom enums custom_enums = OrderedDict() for custom_enum in itertools.chain(global_custom_enums, getattr(item, "custom_enums", [])): if isinstance(custom_enum.underlying_type, six.string_types): type_info = self._CreateTypeInfo(custom_enum.underlying_type) assert type_info, custom_enum.underlying_type custom_enum.underlying_type_info = type_info custom_enums[custom_enum.name] = custom_enum # Create the custom structs custom_structs = OrderedDict() for custom_struct in itertools.chain(global_custom_structs, getattr(item, "custom_structs", [])): members = OrderedDict() for member in custom_struct.members: type_info = self._CreateTypeInfo(member.type) assert type_info, member.type assert member.name not in members, member.name members[member.name] = type_info custom_structs[custom_struct.name] = members # Create the configuration param type infos configuration_param_type_infos = [] for configuration_param in getattr(item, "configuration_params", []): if configuration_param.type in custom_enums: type_info = custom_enums[configuration_param.type].underlying_type_info configuration_param.is_enum = True else: type_info = self._CreateTypeInfo( configuration_param.type, custom_structs=custom_structs, custom_enums=custom_enums, ) assert type_info, configuration_param.type configuration_param_type_infos.append(type_info) input_type_info = self._CreateTypeInfo( item.input_type, custom_structs=custom_structs, custom_enums=custom_enums, ) assert input_type_info, item.input_type output_type_info = self._CreateTypeInfo( item.output_type, custom_structs=custom_structs, custom_enums=custom_enums, ) assert output_type_info, item.output_type dynamic_output_info = self._CreateTypeInfo( "vector<{}>".format(item.output_type), custom_structs=custom_structs, custom_enums=custom_enums, ) # Commit the results self.CustomStructs = custom_structs self.ConfigurationParamTypeInfos = configuration_param_type_infos self.InputTypeInfo = input_type_info self.OutputTypeInfo = output_type_info self.DynamicOutputTypeInfo = dynamic_output_info # ---------------------------------------------------------------------- @classmethod def EnumTypeInfoClasses(cls): cls._InitTypeInfoClasses() yield from cls._type_info_classes # ---------------------------------------------------------------------- # | # | Private Data # | # ---------------------------------------------------------------------- _type_info_classes = None # ---------------------------------------------------------------------- # | # | Private Methods # | # ---------------------------------------------------------------------- @classmethod def _InitTypeInfoClasses(cls): if cls._type_info_classes is not None: return from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo type_info_classes = [ DatetimeTypeInfo, MatrixTypeInfo, SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo, StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo, ] for compound_module in [ScalarTypeInfos, StructTypeInfos]: for obj_name in dir(compound_module): if ( obj_name.startswith("_") or not obj_name.endswith("TypeInfo") or obj_name == "TypeInfo" ): continue type_info_classes.append(getattr(compound_module, obj_name)) # Associate the type infos with the class rather than the instance # so that we only need to perform this initialization once. cls._type_info_classes = type_info_classes # ---------------------------------------------------------------------- @classmethod def _CreateTypeInfo(cls, the_type, *args, **kwargs): cls._InitTypeInfoClasses() is_optional = False if the_type.endswith("?"): the_type = the_type[:-1] is_optional = True type_info_class = None for this_type_info_class in cls._type_info_classes: if isinstance(this_type_info_class.TypeName, six.string_types): if this_type_info_class.TypeName == the_type: type_info_class = this_type_info_class break elif hasattr(this_type_info_class.TypeName, "match"): if this_type_info_class.TypeName.match(the_type): type_info_class = this_type_info_class break if type_info_class is None: return None return type_info_class( *args, member_type=the_type, is_optional=is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs )
normal
{ "blob_id": "d8befc4a79176aefcccd3dceddf04ca965601e5c", "index": 2856, "step-1": "<mask token>\n\n\[email protected]\nclass Plugin(PluginBase):\n <mask token>\n <mask token>\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n", "step-2": "<mask token>\n\n\[email protected]\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n", "step-3": "<mask token>\n\n\[email protected]\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\ndef _GenerateHeaderFile(open_file_func, output_dir, items,\n all_type_info_data, output_stream):\n with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.\n format(items[0].name)), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_{name}.h\"\n\n #include \"Traits.h\"\n #include \"Featurizers/Structs.h\"\n\n #include \"SharedLibraryTests_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n\n \"\"\"\n ).format(name=items[0].name))\n for item, type_info_data in zip(items, all_type_info_data):\n template = getattr(item, 'template', None)\n if template:\n suffix = '_{}_'.format(template)\n type_desc = ' <{}>'.format(template)\n cpp_template_suffix = '<{}>'.format(type_info_data.\n InputTypeInfo.CppType)\n else:\n suffix = '_'\n type_desc = ''\n cpp_template_suffix = ''\n if type_info_data.ConfigurationParamTypeInfos:\n constructor_template_params = ', typename... ConstructorArgTs'\n constructor_params = (\n ',\\n ConstructorArgTs &&... constructor_args')\n constructor_args = (\n 'std::forward<ConstructorArgTs>(constructor_args)..., ')\n else:\n constructor_template_params = ''\n constructor_params = ''\n constructor_args = ''\n fit_prefix_statements = ''\n transform_input_args = (type_info_data.InputTypeInfo.\n GetTransformInputArgs())\n if isinstance(transform_input_args, tuple):\n transform_input_args, fit_prefix_statements = (\n transform_input_args)\n if type_info_data.InputTypeInfo.TypeName == 'bool':\n for_loop = 'for(bool input : inference_input)'\n else:\n for_loop = 'for(auto const & input : inference_input)'\n if type_info_data.OutputTypeInfo.TypeName == 'bool':\n invocation_template = 'results.push_back({});'\n else:\n invocation_template = 'results.emplace_back({});'\n if item.has_dynamic_output:\n output_statement_info = (type_info_data.\n DynamicOutputTypeInfo.GetOutputInfo(invocation_template\n =invocation_template, result_name='results'))\n else:\n output_statement_info = (type_info_data.OutputTypeInfo.\n GetOutputInfo(invocation_template=invocation_template,\n result_name='results'))\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* | {name}{type_desc} */\n template <typename VectorInputT{constructor_template_params}>\n void {name}{suffix}Test(\n std::vector<VectorInputT> const &training_input,\n std::vector<VectorInputT> const &inference_input,\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\n ) {{\n ErrorInfoHandle * pErrorInfo(nullptr);\n\n // Create the estimator\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\n REQUIRE(pEstimatorHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Train\n if(training_input.empty() == false) {{\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\n\n while(true) {{\n TrainingState trainingState(0);\n\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(trainingState != Training)\n break;\n\n FitResult result(0);\n auto const & input(*iter);\n\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(result == ResetAndContinue) {{\n iter = training_input.begin();\n continue;\n }}\n\n ++iter;\n if(iter == training_input.end()) {{\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n iter = training_input.begin();\n }}\n }}\n }}\n\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\n REQUIRE(pErrorInfo == nullptr);\n\n // Once here, training should be complete\n {{\n bool is_complete(false);\n\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n REQUIRE(is_complete);\n }}\n\n // Create the Transformer\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\n REQUIRE(pTransformerHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Destroy the estimator\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n \"\"\"\n ).format(name=item.name, type_desc=type_desc, suffix=suffix,\n vector_result_type=output_statement_info.VectorResultType,\n constructor_template_params=constructor_template_params,\n constructor_params=constructor_params, constructor_args=\n constructor_args, fit_input_args=transform_input_args,\n fit_prefix_statements='' if not fit_prefix_statements else\n \"\"\"{}\n\n \"\"\".format(StringHelpers.LeftJustify(\n fit_prefix_statements.rstrip(), 12))))\n inline_destroy_statement = '// No inline destroy statement'\n trailing_destroy_statement = '// No trailing destroy statement'\n if output_statement_info.DestroyArgs:\n if output_statement_info.DestroyInline:\n inline_destroy_statement = textwrap.dedent(\n \"\"\"\n // Destroy the contents\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs)\n else:\n trailing_destroy_statement = textwrap.dedent(\n \"\"\" for(auto & {var_name}: results) {{\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs, var_name=\n output_statement_info.DestroyVarName or 'result')\n if item.has_dynamic_output:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n\n if(true) {{\n {transform_vars}\n\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n else:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n results.reserve(inference_input.size());\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n f.write(textwrap.dedent(\n \"\"\"\n REQUIRE(verify_func(results));\n\n {trailing_destroy_statement}\n\n // Destroy the transformer\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n trailing_destroy_statement=StringHelpers.LeftJustify(\n trailing_destroy_statement.rstrip(), 4)))\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n", "step-4": "<mask token>\n\n\[email protected]\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\ndef _GenerateHeaderFile(open_file_func, output_dir, items,\n all_type_info_data, output_stream):\n with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.\n format(items[0].name)), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_{name}.h\"\n\n #include \"Traits.h\"\n #include \"Featurizers/Structs.h\"\n\n #include \"SharedLibraryTests_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n\n \"\"\"\n ).format(name=items[0].name))\n for item, type_info_data in zip(items, all_type_info_data):\n template = getattr(item, 'template', None)\n if template:\n suffix = '_{}_'.format(template)\n type_desc = ' <{}>'.format(template)\n cpp_template_suffix = '<{}>'.format(type_info_data.\n InputTypeInfo.CppType)\n else:\n suffix = '_'\n type_desc = ''\n cpp_template_suffix = ''\n if type_info_data.ConfigurationParamTypeInfos:\n constructor_template_params = ', typename... ConstructorArgTs'\n constructor_params = (\n ',\\n ConstructorArgTs &&... constructor_args')\n constructor_args = (\n 'std::forward<ConstructorArgTs>(constructor_args)..., ')\n else:\n constructor_template_params = ''\n constructor_params = ''\n constructor_args = ''\n fit_prefix_statements = ''\n transform_input_args = (type_info_data.InputTypeInfo.\n GetTransformInputArgs())\n if isinstance(transform_input_args, tuple):\n transform_input_args, fit_prefix_statements = (\n transform_input_args)\n if type_info_data.InputTypeInfo.TypeName == 'bool':\n for_loop = 'for(bool input : inference_input)'\n else:\n for_loop = 'for(auto const & input : inference_input)'\n if type_info_data.OutputTypeInfo.TypeName == 'bool':\n invocation_template = 'results.push_back({});'\n else:\n invocation_template = 'results.emplace_back({});'\n if item.has_dynamic_output:\n output_statement_info = (type_info_data.\n DynamicOutputTypeInfo.GetOutputInfo(invocation_template\n =invocation_template, result_name='results'))\n else:\n output_statement_info = (type_info_data.OutputTypeInfo.\n GetOutputInfo(invocation_template=invocation_template,\n result_name='results'))\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* | {name}{type_desc} */\n template <typename VectorInputT{constructor_template_params}>\n void {name}{suffix}Test(\n std::vector<VectorInputT> const &training_input,\n std::vector<VectorInputT> const &inference_input,\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\n ) {{\n ErrorInfoHandle * pErrorInfo(nullptr);\n\n // Create the estimator\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\n REQUIRE(pEstimatorHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Train\n if(training_input.empty() == false) {{\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\n\n while(true) {{\n TrainingState trainingState(0);\n\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(trainingState != Training)\n break;\n\n FitResult result(0);\n auto const & input(*iter);\n\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(result == ResetAndContinue) {{\n iter = training_input.begin();\n continue;\n }}\n\n ++iter;\n if(iter == training_input.end()) {{\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n iter = training_input.begin();\n }}\n }}\n }}\n\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\n REQUIRE(pErrorInfo == nullptr);\n\n // Once here, training should be complete\n {{\n bool is_complete(false);\n\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n REQUIRE(is_complete);\n }}\n\n // Create the Transformer\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\n REQUIRE(pTransformerHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Destroy the estimator\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n \"\"\"\n ).format(name=item.name, type_desc=type_desc, suffix=suffix,\n vector_result_type=output_statement_info.VectorResultType,\n constructor_template_params=constructor_template_params,\n constructor_params=constructor_params, constructor_args=\n constructor_args, fit_input_args=transform_input_args,\n fit_prefix_statements='' if not fit_prefix_statements else\n \"\"\"{}\n\n \"\"\".format(StringHelpers.LeftJustify(\n fit_prefix_statements.rstrip(), 12))))\n inline_destroy_statement = '// No inline destroy statement'\n trailing_destroy_statement = '// No trailing destroy statement'\n if output_statement_info.DestroyArgs:\n if output_statement_info.DestroyInline:\n inline_destroy_statement = textwrap.dedent(\n \"\"\"\n // Destroy the contents\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs)\n else:\n trailing_destroy_statement = textwrap.dedent(\n \"\"\" for(auto & {var_name}: results) {{\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs, var_name=\n output_statement_info.DestroyVarName or 'result')\n if item.has_dynamic_output:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n\n if(true) {{\n {transform_vars}\n\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n else:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n results.reserve(inference_input.size());\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n f.write(textwrap.dedent(\n \"\"\"\n REQUIRE(verify_func(results));\n\n {trailing_destroy_statement}\n\n // Destroy the transformer\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n trailing_destroy_statement=StringHelpers.LeftJustify(\n trailing_destroy_statement.rstrip(), 4)))\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n\n\ndef _GenerateCommonFiles(open_file_func, output_dir, output_stream):\n with open_file_func(os.path.join(output_dir,\n 'SharedLibraryTests_Common.hpp'), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n \"\"\"\n ))\n for type_info_class in TypeInfoData.EnumTypeInfoClasses():\n type_info_class.CreateHelperMethods(f)\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n return 0\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n", "step-5": "# ----------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License\r\n# ----------------------------------------------------------------------\r\n\"\"\"Contains the Plugin object\"\"\"\r\n\r\nimport itertools\r\nimport os\r\nimport sys\r\nimport textwrap\r\n\r\nfrom collections import OrderedDict\r\n\r\nimport six\r\n\r\nimport CommonEnvironment\r\nfrom CommonEnvironment.CallOnExit import CallOnExit\r\nfrom CommonEnvironment import StringHelpers\r\nfrom CommonEnvironment import Interface\r\n\r\n# ----------------------------------------------------------------------\r\n_script_fullpath = CommonEnvironment.ThisFullpath()\r\n_script_dir, _script_name = os.path.split(_script_fullpath)\r\n# ----------------------------------------------------------------------\r\n\r\nsys.path.insert(0, os.path.join(_script_dir, \"..\"))\r\nwith CallOnExit(lambda: sys.path.pop(0)):\r\n from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase\r\n\r\n# ----------------------------------------------------------------------\r\[email protected]\r\nclass Plugin(PluginBase):\r\n # ----------------------------------------------------------------------\r\n # | Properties\r\n Name = Interface.DerivedProperty(\"SharedLibraryTests\")\r\n Description = Interface.DerivedProperty(\r\n \"Generates code used when testing the Shared Library import/export layer\",\r\n )\r\n\r\n # ----------------------------------------------------------------------\r\n # | Methods\r\n @staticmethod\r\n @Interface.override\r\n def Generate(\r\n open_file_func,\r\n global_custom_structs,\r\n global_custom_enums,\r\n data,\r\n output_dir,\r\n status_stream,\r\n ):\r\n result_code = 0\r\n\r\n status_stream.write(\"Preprocessing data...\")\r\n with status_stream.DoneManager():\r\n type_info_data = []\r\n\r\n for items in data:\r\n type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items])\r\n\r\n status_stream.write(\"Generating Common Files...\")\r\n with status_stream.DoneManager() as this_dm:\r\n this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)\r\n if this_dm.result != 0:\r\n return this_dm.result\r\n\r\n for desc, func in [(\"Generating .h files...\", _GenerateHeaderFile)]:\r\n status_stream.write(desc)\r\n with status_stream.DoneManager(\r\n suffix=\"\\n\",\r\n ) as dm:\r\n for index, (items, items_type_info_data) in enumerate(\r\n zip(data, type_info_data),\r\n ):\r\n dm.stream.write(\r\n \"Processing '{}' ({} of {})...\".format(\r\n items[0].name,\r\n index + 1,\r\n len(data),\r\n ),\r\n )\r\n with dm.stream.DoneManager() as this_dm:\r\n this_dm.result = func(\r\n open_file_func,\r\n output_dir,\r\n items,\r\n items_type_info_data,\r\n this_dm.stream,\r\n )\r\n\r\n if dm.result < 0:\r\n return dm.result\r\n\r\n result_code = result_code or dm.result\r\n\r\n return result_code\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\ndef _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream):\r\n with open_file_func(\r\n os.path.join(output_dir, \"SharedLibraryTests_{}.h\".format(items[0].name)),\r\n \"w\",\r\n ) as f:\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\r\n /* Licensed under the MIT License */\r\n /* ---------------------------------------------------------------------- */\r\n #pragma once\r\n\r\n #include \"SharedLibrary_{name}.h\"\r\n\r\n #include \"Traits.h\"\r\n #include \"Featurizers/Structs.h\"\r\n\r\n #include \"SharedLibraryTests_Common.hpp\"\r\n\r\n #if (defined _MSC_VER)\r\n # pragma warning(push)\r\n\r\n // I don't know why MSVC thinks that there is unreachable\r\n // code in these methods during release builds.\r\n # pragma warning(disable: 4702) // Unreachable code\r\n\r\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\r\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\r\n #endif\r\n\r\n \"\"\",\r\n ).format(\r\n name=items[0].name,\r\n ),\r\n )\r\n\r\n for item, type_info_data in zip(items, all_type_info_data):\r\n template = getattr(item, \"template\", None)\r\n if template:\r\n suffix = \"_{}_\".format(template)\r\n type_desc = \" <{}>\".format(template)\r\n cpp_template_suffix = \"<{}>\".format(\r\n type_info_data.InputTypeInfo.CppType,\r\n )\r\n else:\r\n suffix = \"_\"\r\n type_desc = \"\"\r\n cpp_template_suffix = \"\"\r\n\r\n if type_info_data.ConfigurationParamTypeInfos:\r\n constructor_template_params = \", typename... ConstructorArgTs\"\r\n constructor_params = \",\\n ConstructorArgTs &&... constructor_args\"\r\n constructor_args = \"std::forward<ConstructorArgTs>(constructor_args)..., \"\r\n else:\r\n constructor_template_params = \"\"\r\n constructor_params = \"\"\r\n constructor_args = \"\"\r\n\r\n fit_prefix_statements = \"\"\r\n\r\n transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs()\r\n if isinstance(transform_input_args, tuple):\r\n transform_input_args, fit_prefix_statements = transform_input_args\r\n\r\n # Special processing for vector<bool>\r\n if type_info_data.InputTypeInfo.TypeName == \"bool\":\r\n # vector<bool> isn't actually a bool, so we can't take a direct reference to it\r\n for_loop = \"for(bool input : inference_input)\"\r\n else:\r\n for_loop = \"for(auto const & input : inference_input)\"\r\n\r\n if type_info_data.OutputTypeInfo.TypeName == \"bool\":\r\n # vector<bool> doesn't support emplace_back on some platforms\r\n invocation_template = \"results.push_back({});\"\r\n else:\r\n invocation_template = \"results.emplace_back({});\"\r\n\r\n # Get the output statement information\r\n if item.has_dynamic_output:\r\n output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo(\r\n invocation_template=invocation_template,\r\n result_name=\"results\",\r\n )\r\n else:\r\n output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo(\r\n invocation_template=invocation_template,\r\n result_name=\"results\",\r\n )\r\n\r\n # Write the training statements\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* | {name}{type_desc} */\r\n template <typename VectorInputT{constructor_template_params}>\r\n void {name}{suffix}Test(\r\n std::vector<VectorInputT> const &training_input,\r\n std::vector<VectorInputT> const &inference_input,\r\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\r\n ) {{\r\n ErrorInfoHandle * pErrorInfo(nullptr);\r\n\r\n // Create the estimator\r\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\r\n\r\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pEstimatorHandle != nullptr);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Train\r\n if(training_input.empty() == false) {{\r\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\r\n\r\n while(true) {{\r\n TrainingState trainingState(0);\r\n\r\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n if(trainingState != Training)\r\n break;\r\n\r\n FitResult result(0);\r\n auto const & input(*iter);\r\n\r\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n if(result == ResetAndContinue) {{\r\n iter = training_input.begin();\r\n continue;\r\n }}\r\n\r\n ++iter;\r\n if(iter == training_input.end()) {{\r\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n iter = training_input.begin();\r\n }}\r\n }}\r\n }}\r\n\r\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Once here, training should be complete\r\n {{\r\n bool is_complete(false);\r\n\r\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n REQUIRE(is_complete);\r\n }}\r\n\r\n // Create the Transformer\r\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\r\n\r\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\r\n REQUIRE(pTransformerHandle != nullptr);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Destroy the estimator\r\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n type_desc=type_desc,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n constructor_template_params=constructor_template_params,\r\n constructor_params=constructor_params,\r\n constructor_args=constructor_args,\r\n fit_input_args=transform_input_args,\r\n fit_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements.rstrip(),\r\n 12,\r\n ),\r\n ),\r\n ),\r\n )\r\n\r\n # Write the inferencing statements\r\n inline_destroy_statement = \"// No inline destroy statement\"\r\n trailing_destroy_statement = \"// No trailing destroy statement\"\r\n\r\n if output_statement_info.DestroyArgs:\r\n if output_statement_info.DestroyInline:\r\n inline_destroy_statement = textwrap.dedent(\r\n \"\"\"\\\r\n\r\n // Destroy the contents\r\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n args=output_statement_info.DestroyArgs,\r\n )\r\n else:\r\n trailing_destroy_statement = textwrap.dedent(\r\n \"\"\"\\\r\n for(auto & {var_name}: results) {{\r\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n args=output_statement_info.DestroyArgs,\r\n var_name=output_statement_info.DestroyVarName or \"result\",\r\n )\r\n\r\n if item.has_dynamic_output:\r\n f.write(\r\n StringHelpers.LeftJustify(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n // Inference\r\n std::vector<{vector_result_type}> results;\r\n\r\n {for_loop} {{\r\n {transform_prefix_statements}{transform_vars}\r\n\r\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n\r\n if(true) {{\r\n {transform_vars}\r\n\r\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n for_loop=for_loop,\r\n transform_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements,\r\n 4,\r\n ).rstrip(),\r\n ),\r\n transform_vars=StringHelpers.LeftJustify(\r\n \"\\n\".join(\r\n [\r\n \"{} {};\".format(var.Type, var.Name)\r\n for var in output_statement_info.TransformVars\r\n ]\r\n ),\r\n 4,\r\n ),\r\n transform_input_args=transform_input_args,\r\n transform_output_args=\", \".join([\"&{}\".format(p.Name) for p in output_statement_info.TransformVars]),\r\n transform_statement=StringHelpers.LeftJustify(\r\n output_statement_info.AppendResultStatement.rstrip(),\r\n 4,\r\n ),\r\n inline_destroy_statement=StringHelpers.LeftJustify(\r\n inline_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n 4,\r\n skip_first_line=False,\r\n ),\r\n )\r\n else:\r\n f.write(\r\n StringHelpers.LeftJustify(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n // Inference\r\n std::vector<{vector_result_type}> results;\r\n\r\n results.reserve(inference_input.size());\r\n\r\n {for_loop} {{\r\n {transform_prefix_statements}{transform_vars}\r\n\r\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n for_loop=for_loop,\r\n transform_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements,\r\n 4,\r\n ).rstrip(),\r\n ),\r\n transform_vars=StringHelpers.LeftJustify(\r\n \"\\n\".join(\r\n [\r\n \"{} {};\".format(var.Type, var.Name)\r\n for var in output_statement_info.TransformVars\r\n ]\r\n ),\r\n 4,\r\n ),\r\n transform_input_args=transform_input_args,\r\n transform_output_args=\", \".join([\"&{}\".format(p.Name) for p in output_statement_info.TransformVars]),\r\n transform_statement=StringHelpers.LeftJustify(\r\n output_statement_info.AppendResultStatement.rstrip(),\r\n 4,\r\n ),\r\n inline_destroy_statement=StringHelpers.LeftJustify(\r\n inline_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n 4,\r\n skip_first_line=False,\r\n ),\r\n )\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n\r\n REQUIRE(verify_func(results));\r\n\r\n {trailing_destroy_statement}\r\n\r\n // Destroy the transformer\r\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n }}\r\n\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n trailing_destroy_statement=StringHelpers.LeftJustify(\r\n trailing_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n )\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n #if (defined _MSC_VER)\r\n # pragma warning(pop)\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n\r\n# ----------------------------------------------------------------------\r\ndef _GenerateCommonFiles(open_file_func, output_dir, output_stream):\r\n with open_file_func(\r\n os.path.join(output_dir, \"SharedLibraryTests_Common.hpp\"),\r\n \"w\",\r\n ) as f:\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\r\n /* Licensed under the MIT License */\r\n /* ---------------------------------------------------------------------- */\r\n #pragma once\r\n\r\n #include \"SharedLibrary_Common.hpp\"\r\n\r\n #if (defined _MSC_VER)\r\n # pragma warning(push)\r\n\r\n // I don't know why MSVC thinks that there is unreachable\r\n // code in these methods during release builds.\r\n # pragma warning(disable: 4702) // Unreachable code\r\n\r\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\r\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n for type_info_class in TypeInfoData.EnumTypeInfoClasses():\r\n type_info_class.CreateHelperMethods(f)\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n #if (defined _MSC_VER)\r\n # pragma warning(pop)\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n return 0\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\nclass TypeInfoData(object):\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Public Methods\r\n # |\r\n # ----------------------------------------------------------------------\r\n def __init__(self, item, global_custom_structs, global_custom_enums):\r\n # Create the custom enums\r\n custom_enums = OrderedDict()\r\n\r\n for custom_enum in itertools.chain(global_custom_enums, getattr(item, \"custom_enums\", [])):\r\n if isinstance(custom_enum.underlying_type, six.string_types):\r\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\r\n assert type_info, custom_enum.underlying_type\r\n\r\n custom_enum.underlying_type_info = type_info\r\n\r\n custom_enums[custom_enum.name] = custom_enum\r\n\r\n # Create the custom structs\r\n custom_structs = OrderedDict()\r\n\r\n for custom_struct in itertools.chain(global_custom_structs, getattr(item, \"custom_structs\", [])):\r\n members = OrderedDict()\r\n\r\n for member in custom_struct.members:\r\n type_info = self._CreateTypeInfo(member.type)\r\n assert type_info, member.type\r\n\r\n assert member.name not in members, member.name\r\n members[member.name] = type_info\r\n\r\n custom_structs[custom_struct.name] = members\r\n\r\n # Create the configuration param type infos\r\n configuration_param_type_infos = []\r\n\r\n for configuration_param in getattr(item, \"configuration_params\", []):\r\n if configuration_param.type in custom_enums:\r\n type_info = custom_enums[configuration_param.type].underlying_type_info\r\n configuration_param.is_enum = True\r\n\r\n else:\r\n type_info = self._CreateTypeInfo(\r\n configuration_param.type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert type_info, configuration_param.type\r\n\r\n configuration_param_type_infos.append(type_info)\r\n\r\n input_type_info = self._CreateTypeInfo(\r\n item.input_type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert input_type_info, item.input_type\r\n\r\n output_type_info = self._CreateTypeInfo(\r\n item.output_type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert output_type_info, item.output_type\r\n\r\n dynamic_output_info = self._CreateTypeInfo(\r\n \"vector<{}>\".format(item.output_type),\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n\r\n # Commit the results\r\n self.CustomStructs = custom_structs\r\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\r\n self.InputTypeInfo = input_type_info\r\n self.OutputTypeInfo = output_type_info\r\n self.DynamicOutputTypeInfo = dynamic_output_info\r\n\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def EnumTypeInfoClasses(cls):\r\n cls._InitTypeInfoClasses()\r\n yield from cls._type_info_classes\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Private Data\r\n # |\r\n # ----------------------------------------------------------------------\r\n _type_info_classes = None\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Private Methods\r\n # |\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def _InitTypeInfoClasses(cls):\r\n if cls._type_info_classes is not None:\r\n return\r\n\r\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\r\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\r\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\r\n\r\n type_info_classes = [\r\n DatetimeTypeInfo,\r\n MatrixTypeInfo,\r\n SingleValueSparseVectorTypeInfo,\r\n SparseVectorTypeInfo,\r\n StringTypeInfo,\r\n TupleTypeInfo,\r\n UniqueIdTypeInfo,\r\n VectorTypeInfo,\r\n ]\r\n\r\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\r\n for obj_name in dir(compound_module):\r\n if (\r\n obj_name.startswith(\"_\")\r\n or not obj_name.endswith(\"TypeInfo\")\r\n or obj_name == \"TypeInfo\"\r\n ):\r\n continue\r\n\r\n type_info_classes.append(getattr(compound_module, obj_name))\r\n\r\n # Associate the type infos with the class rather than the instance\r\n # so that we only need to perform this initialization once.\r\n cls._type_info_classes = type_info_classes\r\n\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\r\n cls._InitTypeInfoClasses()\r\n\r\n is_optional = False\r\n\r\n if the_type.endswith(\"?\"):\r\n the_type = the_type[:-1]\r\n is_optional = True\r\n\r\n type_info_class = None\r\n\r\n for this_type_info_class in cls._type_info_classes:\r\n if isinstance(this_type_info_class.TypeName, six.string_types):\r\n if this_type_info_class.TypeName == the_type:\r\n type_info_class = this_type_info_class\r\n break\r\n\r\n elif hasattr(this_type_info_class.TypeName, \"match\"):\r\n if this_type_info_class.TypeName.match(the_type):\r\n type_info_class = this_type_info_class\r\n break\r\n\r\n if type_info_class is None:\r\n return None\r\n\r\n return type_info_class(\r\n *args,\r\n member_type=the_type,\r\n is_optional=is_optional,\r\n create_type_info_func=cls._CreateTypeInfo,\r\n **kwargs\r\n )\r\n", "step-ids": [ 8, 9, 10, 11, 15 ] }
[ 8, 9, 10, 11, 15 ]
import csv import sys if len(sys.argv[1:]) == 5 : (name_pos, start_pos, length_pos, first_note_pos, second_note_pos) = [int(pos) for pos in sys.argv[1:]] elif len(sys.argv[1:]) == 4 : (name_pos, start_pos, length_pos, first_note_pos) = [int(pos) for pos in sys.argv[1:]] second_note_pos = None else : name_pos, start_pos, length_pos, first_note_pos, second_note_pos = 5, 3, 4, 2, 1 blacklist=("Blank", "semicolon filler") reader = csv.reader(sys.stdin) writer = csv.writer(sys.stdout) writer.writerow(('column', 'start', 'length')) for row in reader : try : if not row[name_pos].strip() or row[name_pos].strip() in blacklist : continue except IndexError : continue if second_note_pos is not None and row[second_note_pos].strip() : col_name = '; '.join(name.strip() for name in (row[name_pos], row[first_note_pos], row[second_note_pos])) elif row[first_note_pos].strip() : col_name = '; '.join(name.strip() for name in (row[name_pos], row[first_note_pos])) else : col_name = row[name_pos].strip() col_start = int(row[start_pos].split('-')[0].strip()) col_length = int(float(row[length_pos])) - 1 writer.writerow((col_name, col_start, col_length))
normal
{ "blob_id": "d7653a205fb8203fed4009846780c63dd1bcb505", "index": 3603, "step-1": "<mask token>\n", "step-2": "<mask token>\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\n<mask token>\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n", "step-3": "<mask token>\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\nblacklist = 'Blank', 'semicolon filler'\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n", "step-4": "import csv\nimport sys\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\nblacklist = 'Blank', 'semicolon filler'\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n", "step-5": "import csv\nimport sys\n\nif len(sys.argv[1:]) == 5 :\n (name_pos, start_pos, length_pos, \n first_note_pos, second_note_pos) = [int(pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4 :\n (name_pos, start_pos, length_pos, \n first_note_pos) = [int(pos) for pos in sys.argv[1:]]\n second_note_pos = None\nelse :\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = 5, 3, 4, 2, 1\n\nblacklist=(\"Blank\", \"semicolon filler\")\n\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\n\nfor row in reader :\n try :\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist :\n continue\n except IndexError :\n continue\n if second_note_pos is not None and row[second_note_pos].strip() :\n col_name = '; '.join(name.strip() for name in (row[name_pos], \n row[first_note_pos], \n row[second_note_pos]))\n elif row[first_note_pos].strip() :\n col_name = '; '.join(name.strip() for name in (row[name_pos], \n row[first_note_pos]))\n else :\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from gerador_senha import gerar_senha gerar_senha()
normal
{ "blob_id": "e81da535408cc36655328b37ca99b4f775f3a78e", "index": 8435, "step-1": "<mask token>\n", "step-2": "<mask token>\ngerar_senha()\n", "step-3": "from gerador_senha import gerar_senha\ngerar_senha()\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import logging from .const import ( DOMAIN, CONF_SCREENS ) from typing import Any, Callable, Dict, Optional from homeassistant.helpers.entity import Entity from homeassistant.helpers.typing import ( ConfigType, DiscoveryInfoType, HomeAssistantType, ) from homeassistant.core import callback from homeassistant.helpers.event import async_track_state_change from .dgus_protocol import create_protocol _LOGGER = logging.getLogger(__name__) async def async_setup_platform( hass: HomeAssistantType, config: ConfigType, async_add_entities: Callable, discovery_info: Optional[DiscoveryInfoType] = None, ) -> None: sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]] async_add_entities(sensors, update_before_add=True) class StateConverters: @staticmethod def extract_attr(state, attr): if attr: return state.attributes[attr] else: return state.as_dict()['state'] @staticmethod def send_int(state, settings, protocol): vp = settings['vp'] attr = settings.get('attribute', None) try: value = int(float(StateConverters.extract_attr(state, attr))) protocol.write_vp(vp, value) except Exception as er: _LOGGER.error("Can't send value: %s", str(er)) @staticmethod def send_map(state, settings, protocol): vp = settings['vp'] map_state = settings['map'] attr = settings.get('attribute', None) key = str(StateConverters.extract_attr(state, attr)) value = int(map_state[key]) protocol.write_vp(vp, value) class DGUSScreen(Entity): def __init__(self, hass, screen): self._state = None self._hass = hass self._name = screen['name'] self._state_track_settings = { entry['entity_id']: entry for entry in screen.get('show_states', [])} try: self._protocol = create_protocol( screen['port_name'], screen['bound_rate'], self.on_data) except Exception as er: _LOGGER.error("Can't open serial port %s, : %s", screen['port_name'], str(er)) entiti_ids = [entry['entity_id'] for entry in screen['show_states']] async_track_state_change(hass, entiti_ids, self.state_listener) def state_listener(self, entity, old_state, new_state): settings = self._state_track_settings[entity] if settings['type'] == 'int': StateConverters.send_int( new_state, settings, self._protocol.protocol) elif settings['type'] == 'map': StateConverters.send_map( new_state, settings, self._protocol.protocol) @property def name(self): return self._name @property def state(self): return self._state def on_data(self, vp, value): """fire event for data, received from screen""" eventName = self.name + "_set_vp" self._hass.bus.fire(eventName, {"vp": vp, "value": value})
normal
{ "blob_id": "6f1b08a5ae1a07a30d89f3997461f4f97658f364", "index": 4920, "step-1": "<mask token>\n\n\nclass StateConverters:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n", "step-2": "<mask token>\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n", "step-3": "<mask token>\n\n\nasync def async_setup_platform(hass: HomeAssistantType, config: ConfigType,\n async_add_entities: Callable, discovery_info: Optional[\n DiscoveryInfoType]=None) ->None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n", "step-4": "import logging\nfrom .const import DOMAIN, CONF_SCREENS\nfrom typing import Any, Callable, Dict, Optional\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, HomeAssistantType\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.event import async_track_state_change\nfrom .dgus_protocol import create_protocol\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass: HomeAssistantType, config: ConfigType,\n async_add_entities: Callable, discovery_info: Optional[\n DiscoveryInfoType]=None) ->None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n", "step-5": "import logging\nfrom .const import (\n DOMAIN,\n CONF_SCREENS\n)\nfrom typing import Any, Callable, Dict, Optional\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.typing import (\n ConfigType,\n DiscoveryInfoType,\n HomeAssistantType,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.event import async_track_state_change\nfrom .dgus_protocol import create_protocol\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(\n hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities: Callable,\n discovery_info: Optional[DiscoveryInfoType] = None,\n) -> None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {\n entry['entity_id']: entry for entry in screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(\n screen['port_name'], screen['bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\",\n screen['port_name'], str(er))\n \n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(\n new_state, settings, self._protocol.protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(\n new_state, settings, self._protocol.protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + \"_set_vp\"\n self._hass.bus.fire(eventName, {\"vp\": vp, \"value\": value})\n", "step-ids": [ 7, 10, 11, 13, 14 ] }
[ 7, 10, 11, 13, 14 ]
def IsPn(a): temp = (24*a+1)**0.5+1 if temp % 6 == 0: return True else: return False def IsHn(a): temp = (8*a+1)**0.5+1 if temp % 4 == 0: return True else: return False def CalTn(a): return (a**2+a)/2 i = 286 while 1: temp = CalTn(i) if IsHn(temp) and IsPn(temp): break i += 1 print i,temp
normal
{ "blob_id": "7474e60feff61c4ef15680ecc09d910e6e1d6322", "index": 4603, "step-1": "def IsPn(a):\n\ttemp = (24*a+1)**0.5+1\n\tif temp % 6 == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef IsHn(a):\n\ttemp = (8*a+1)**0.5+1\n\tif temp % 4 == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef CalTn(a):\n\treturn (a**2+a)/2\n\ni = 286\nwhile 1:\n\ttemp = CalTn(i)\n\tif IsHn(temp) and IsPn(temp):\n\t\tbreak\n\ti += 1\n\nprint i,temp ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
""" Given two strings, a and b, that may or may not be of the same length, determine the minimum number of character deletions required to make a and b anagrams. Any characters can be deleted from either of the strings. """ from collections import Counter import math import os import random import re import sys # Complete the makeAnagram function below. def makeAnagram(a, b): ct_a = Counter(a) ct_b = Counter(b) ct_a.subtract(ct_b) return sum(abs(i) for i in ct_a.values()) if __name__ == '__main__': a="cde" b="abc" res = makeAnagram(a, b) print(res)
normal
{ "blob_id": "3b15767988f1d958fc456f7966f425f93deb9017", "index": 8302, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef makeAnagram(a, b):\n ct_a = Counter(a)\n ct_b = Counter(b)\n ct_a.subtract(ct_b)\n return sum(abs(i) for i in ct_a.values())\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef makeAnagram(a, b):\n ct_a = Counter(a)\n ct_b = Counter(b)\n ct_a.subtract(ct_b)\n return sum(abs(i) for i in ct_a.values())\n\n\nif __name__ == '__main__':\n a = 'cde'\n b = 'abc'\n res = makeAnagram(a, b)\n print(res)\n", "step-4": "<mask token>\nfrom collections import Counter\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef makeAnagram(a, b):\n ct_a = Counter(a)\n ct_b = Counter(b)\n ct_a.subtract(ct_b)\n return sum(abs(i) for i in ct_a.values())\n\n\nif __name__ == '__main__':\n a = 'cde'\n b = 'abc'\n res = makeAnagram(a, b)\n print(res)\n", "step-5": "\"\"\"\nGiven two strings, a and b, that may or may not be of the same length, \ndetermine the minimum number of character deletions required to make\na and b anagrams. Any characters can be deleted from either of the strings.\n\"\"\"\nfrom collections import Counter\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the makeAnagram function below.\ndef makeAnagram(a, b):\n ct_a = Counter(a)\n ct_b = Counter(b)\n ct_a.subtract(ct_b)\n return sum(abs(i) for i in ct_a.values())\n\nif __name__ == '__main__':\n a=\"cde\"\n b=\"abc\"\n res = makeAnagram(a, b)\n print(res)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]