code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from aws_cdk import core as cdk
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam,
aws_ecs_patterns as ecs_patterns)
class kdECSDemo(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# 建VPC与ECS Cluster
# TODO: 即使指定 max_azs, 也只能部署2个AZ
vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16')
cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc)
#建Task Definition
task_definition = ecs.FargateTaskDefinition(self, "ECSDemoTaskDefinition",
task_role=iam.Role.from_role_arn(self, "fargate_task_role", "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"),
execution_role=iam.Role.from_role_arn(self, "fargate_task_execution_role", "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole")
)
task_definition.add_volume(name="data")
# App Container
app_container = task_definition.add_container(
"AppContainer",
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="app-file-image", repository_name="app-file")
),
logging=ecs.FireLensLogDriver()
)
app_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# app_container.add_port_mappings(ecs.PortMapping(container_port=80))
# Log Router
fluentbit_container = ecs.FirelensLogRouter(self, "fluentbit_container",
firelens_config=ecs.FirelensConfig(
type=ecs.FirelensLogRouterType.FLUENTBIT,
options=ecs.FirelensOptions(
config_file_value="/extra.conf"
)
),
task_definition=task_definition,
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="log-router", repository_name="firelens-file")
),
logging=ecs.AwsLogDriver(stream_prefix="/ecs/firelens-fluentbit-demo/")
)
fluentbit_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# #建Service
# ecs_patterns.ApplicationLoadBalancedFargateService(self, "ServiceWithLogging",
# cluster=cluster,
# desired_count=1, # Default is 1
# task_definition=task_definition,
# public_load_balancer=True) # Default is False
|
normal
|
{
"blob_id": "12cd3dbf211b202d25dc6f940156536c9fe3f76f",
"index": 3385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass kdECSDemo(cdk.Stack):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs\n ) ->None:\n super().__init__(scope, construct_id, **kwargs)\n vpc = ec2.Vpc(self, 'ECSVPC', cidr='10.0.0.0/16')\n cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc)\n task_definition = ecs.FargateTaskDefinition(self,\n 'ECSDemoTaskDefinition', task_role=iam.Role.from_role_arn(self,\n 'fargate_task_role',\n 'arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens'),\n execution_role=iam.Role.from_role_arn(self,\n 'fargate_task_execution_role',\n 'arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole'))\n task_definition.add_volume(name='data')\n app_container = task_definition.add_container('AppContainer', image\n =ecs.ContainerImage.from_ecr_repository(ecr.Repository.\n from_repository_name(self, id='app-file-image', repository_name\n ='app-file')), logging=ecs.FireLensLogDriver())\n app_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n fluentbit_container = ecs.FirelensLogRouter(self,\n 'fluentbit_container', firelens_config=ecs.FirelensConfig(type=\n ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.\n FirelensOptions(config_file_value='/extra.conf')),\n task_definition=task_definition, image=ecs.ContainerImage.\n from_ecr_repository(ecr.Repository.from_repository_name(self,\n id='log-router', repository_name='firelens-file')), logging=ecs\n .AwsLogDriver(stream_prefix='/ecs/firelens-fluentbit-demo/'))\n fluentbit_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n",
"step-4": "from aws_cdk import core as cdk\nfrom aws_cdk import core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam, aws_ecs_patterns as ecs_patterns\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs\n ) ->None:\n super().__init__(scope, construct_id, **kwargs)\n vpc = ec2.Vpc(self, 'ECSVPC', cidr='10.0.0.0/16')\n cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc)\n task_definition = ecs.FargateTaskDefinition(self,\n 'ECSDemoTaskDefinition', task_role=iam.Role.from_role_arn(self,\n 'fargate_task_role',\n 'arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens'),\n execution_role=iam.Role.from_role_arn(self,\n 'fargate_task_execution_role',\n 'arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole'))\n task_definition.add_volume(name='data')\n app_container = task_definition.add_container('AppContainer', image\n =ecs.ContainerImage.from_ecr_repository(ecr.Repository.\n from_repository_name(self, id='app-file-image', repository_name\n ='app-file')), logging=ecs.FireLensLogDriver())\n app_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n fluentbit_container = ecs.FirelensLogRouter(self,\n 'fluentbit_container', firelens_config=ecs.FirelensConfig(type=\n ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.\n FirelensOptions(config_file_value='/extra.conf')),\n task_definition=task_definition, image=ecs.ContainerImage.\n from_ecr_repository(ecr.Repository.from_repository_name(self,\n id='log-router', repository_name='firelens-file')), logging=ecs\n .AwsLogDriver(stream_prefix='/ecs/firelens-fluentbit-demo/'))\n fluentbit_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n",
"step-5": "from aws_cdk import core as cdk\n\n# For consistency with other languages, `cdk` is the preferred import name for\n# the CDK's core module. The following line also imports it as `core` for use\n# with examples from the CDK Developer's Guide, which are in the process of\n# being updated to use `cdk`. You may delete this import if you don't need it.\nfrom aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam,\n aws_ecs_patterns as ecs_patterns)\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # 建VPC与ECS Cluster\n # TODO: 即使指定 max_azs, 也只能部署2个AZ\n vpc = ec2.Vpc(self, \"ECSVPC\", cidr='10.0.0.0/16') \n cluster = ecs.Cluster(self, \"ECSCluster\", vpc=vpc)\n\n #建Task Definition\n task_definition = ecs.FargateTaskDefinition(self, \"ECSDemoTaskDefinition\",\n task_role=iam.Role.from_role_arn(self, \"fargate_task_role\", \"arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens\"),\n execution_role=iam.Role.from_role_arn(self, \"fargate_task_execution_role\", \"arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole\")\n )\n\n task_definition.add_volume(name=\"data\")\n\n # App Container\n app_container = task_definition.add_container(\n \"AppContainer\",\n image=ecs.ContainerImage.from_ecr_repository(\n ecr.Repository.from_repository_name(self, id=\"app-file-image\", repository_name=\"app-file\")\n ), \n logging=ecs.FireLensLogDriver()\n )\n\n app_container.add_mount_points(ecs.MountPoint(\n container_path=\"/data/logs\",\n read_only=False,\n source_volume=\"data\"\n ))\n\n # app_container.add_port_mappings(ecs.PortMapping(container_port=80))\n \n # Log Router\n fluentbit_container = ecs.FirelensLogRouter(self, \"fluentbit_container\",\n firelens_config=ecs.FirelensConfig(\n type=ecs.FirelensLogRouterType.FLUENTBIT,\n options=ecs.FirelensOptions(\n config_file_value=\"/extra.conf\"\n )\n ),\n task_definition=task_definition,\n image=ecs.ContainerImage.from_ecr_repository(\n ecr.Repository.from_repository_name(self, id=\"log-router\", repository_name=\"firelens-file\")\n ),\n logging=ecs.AwsLogDriver(stream_prefix=\"/ecs/firelens-fluentbit-demo/\")\n )\n \n fluentbit_container.add_mount_points(ecs.MountPoint(\n container_path=\"/data/logs\",\n read_only=False,\n source_volume=\"data\"\n ))\n\n # #建Service\n # ecs_patterns.ApplicationLoadBalancedFargateService(self, \"ServiceWithLogging\",\n # cluster=cluster,\n # desired_count=1, # Default is 1\n # task_definition=task_definition,\n # public_load_balancer=True) # Default is False\n\n \n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, request, render_template
from random import choice, sample
app = Flask(__name__)
horoscopes = [
'your day will be awesome',
'your day will be terrific',
'your day will be fantastic',
'neato, you have a fantabulous day ahead',
'your day will be oh-so-not-meh',
'this day will be brilliant',
'looks like today is just ducky',
'I proclaim your day to be INCREDIBLE',
'this day will be wonderful',
'smash this day',
'this day shall be lovely',
'your day will be just satenacious']
@app.route('/')
def index():
"""Show the homepage and ask the user's name."""
return render_template('index.html')
@app.route('/horoscope')
def get_horoscope():
"""Give the user a horoscope"""
name = request.args.get('name')
num_horoscopes = int(request.args.get('num_horoscopes'))
show_horoscopes = request.args.get('show_horoscopes')
horoscopes_to_show = sample(horoscopes, num_horoscopes)
# predictions = ', '.join(sample(horoscopes, num_horoscopes))
return render_template(
'horoscopes.html',
name=name,
show_horoscopes=show_horoscopes,
horoscopes_to_show=horoscopes_to_show))
"""
if show_horoscopes:
return f"Hello there, {name}: {predictions}."
else:
return f"Hello there, {name}! Have a nice day!"
"""
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "09d32b48ae88b1066dd0aa435a351c4fb1fc04ec",
"index": 9759,
"step-1": "from flask import Flask, request, render_template\nfrom random import choice, sample\n\napp = Flask(__name__)\n\nhoroscopes = [\n 'your day will be awesome',\n 'your day will be terrific',\n 'your day will be fantastic',\n 'neato, you have a fantabulous day ahead',\n 'your day will be oh-so-not-meh',\n 'this day will be brilliant',\n 'looks like today is just ducky',\n 'I proclaim your day to be INCREDIBLE',\n 'this day will be wonderful',\n 'smash this day',\n 'this day shall be lovely',\n 'your day will be just satenacious']\n\n\[email protected]('/')\ndef index():\n \"\"\"Show the homepage and ask the user's name.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/horoscope')\ndef get_horoscope():\n \"\"\"Give the user a horoscope\"\"\"\n name = request.args.get('name')\n num_horoscopes = int(request.args.get('num_horoscopes'))\n show_horoscopes = request.args.get('show_horoscopes')\n horoscopes_to_show = sample(horoscopes, num_horoscopes)\n # predictions = ', '.join(sample(horoscopes, num_horoscopes))\n\n return render_template(\n 'horoscopes.html',\n name=name,\n show_horoscopes=show_horoscopes,\n horoscopes_to_show=horoscopes_to_show))\n\n\"\"\"\n if show_horoscopes:\n return f\"Hello there, {name}: {predictions}.\"\n else:\n return f\"Hello there, {name}! Have a nice day!\"\n\"\"\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/bin/usr/python2.7.x
import os, re, urllib2
def main():
ip = raw_input(" Target IP : ")
check(ip)
def check(ip):
try:
print "Loading Check File Uploader...."
print 58*"-"
page = 1
while page <= 21:
bing = "http://www.bing.com/search?q=ip%3A" + \
ip + "+upload&count=50&first=" + str(page)
openbing = urllib2.urlopen(bing)
readbing = openbing.read()
findwebs = re.findall('<h2><a href="(.*?)"', readbing)
sites = findwebs
for i in sites:
try:
response = urllib2.urlopen(i).read()
checksqli(i)
except urllib2.HTTPError, e:
str(sites).strip(i)
page = page + 10
except:
pass
def checksqli(sqli):
responsetwo = urllib2.urlopen(sqli).read()
find = re.findall('type="file"', responsetwo)
if find:
print(" Found ---> " + sqli)
main()
|
normal
|
{
"blob_id": "21af630bf383ee1bdd0f644283f0ddadde71620a",
"index": 236,
"step-1": "#!/bin/usr/python2.7.x\r\n\r\nimport os, re, urllib2\r\n\r\ndef main():\r\n\tip = raw_input(\" Target IP : \")\r\n\tcheck(ip)\r\n\r\ndef check(ip):\r\n\ttry:\r\n\t\tprint \"Loading Check File Uploader....\"\r\n\t\tprint 58*\"-\"\r\n\t\tpage = 1\r\n\t\twhile page <= 21:\r\n\t\t\tbing = \"http://www.bing.com/search?q=ip%3A\" + \\\r\n\t\t\tip + \"+upload&count=50&first=\" + str(page)\r\n\t\t\topenbing = urllib2.urlopen(bing)\r\n\t\t\treadbing = openbing.read()\r\n\t\t\tfindwebs = re.findall('<h2><a href=\"(.*?)\"', readbing)\r\n\t\t\tsites = findwebs\r\n\t\t\tfor i in sites:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tresponse = urllib2.urlopen(i).read()\r\n\t\t\t\t\tchecksqli(i)\r\n\t\t\t\texcept urllib2.HTTPError, e:\r\n\t\t\t\t\tstr(sites).strip(i)\r\n\t\t\tpage = page + 10\r\n\texcept:\r\n\t\tpass\r\n\r\ndef checksqli(sqli):\r\n responsetwo = urllib2.urlopen(sqli).read()\r\n find = re.findall('type=\"file\"', responsetwo)\r\n if find:\r\n print(\" Found ---> \" + sqli)\r\n\t\t\t\t\t\r\nmain()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
BCXYZ company has up to
employees.
The company decides to create a unique identification number (UID) for each of its employees.
The company has assigned you the task of validating all the randomly generated UIDs.
A valid UID must follow the rules below:
It must contain at least 2 uppercase English alphabet characters.
It must contain at least 3 digits.
It should only contain alphanumeric characters
No character should repeat.
There must be exactly 10 characters in a valid UID.
"""
import re
for _ in range(int(input())):
imp = input()
if bool(re.search(r'[a-zA-Z0-9]{10}', imp)) and bool(re.search(r'([A-Z].*){2}', imp)) and \
bool(re.search(r'([0-9].*){3}', imp)) and not bool(re.search(r'.*(.).*\1', imp)):
print("Valid")
else:
print("Invalid")
|
normal
|
{
"blob_id": "3a5c8ee49c50820cea201c088acca32e018c1501",
"index": 3715,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(int(input())):\n imp = input()\n if bool(re.search('[a-zA-Z0-9]{10}', imp)) and bool(re.search(\n '([A-Z].*){2}', imp)) and bool(re.search('([0-9].*){3}', imp)\n ) and not bool(re.search('.*(.).*\\\\1', imp)):\n print('Valid')\n else:\n print('Invalid')\n",
"step-3": "<mask token>\nimport re\nfor _ in range(int(input())):\n imp = input()\n if bool(re.search('[a-zA-Z0-9]{10}', imp)) and bool(re.search(\n '([A-Z].*){2}', imp)) and bool(re.search('([0-9].*){3}', imp)\n ) and not bool(re.search('.*(.).*\\\\1', imp)):\n print('Valid')\n else:\n print('Invalid')\n",
"step-4": "\"\"\"\nBCXYZ company has up to\n\nemployees.\nThe company decides to create a unique identification number (UID) for each of its employees.\nThe company has assigned you the task of validating all the randomly generated UIDs.\nA valid UID must follow the rules below:\n\nIt must contain at least 2 uppercase English alphabet characters.\nIt must contain at least 3 digits.\nIt should only contain alphanumeric characters\nNo character should repeat.\nThere must be exactly 10 characters in a valid UID.\n\"\"\"\n\nimport re\n\nfor _ in range(int(input())):\n imp = input()\n if bool(re.search(r'[a-zA-Z0-9]{10}', imp)) and bool(re.search(r'([A-Z].*){2}', imp)) and \\\n bool(re.search(r'([0-9].*){3}', imp)) and not bool(re.search(r'.*(.).*\\1', imp)):\n print(\"Valid\")\n else:\n print(\"Invalid\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print(60 * 60)
seconds_per_hour = 60 * 60
print(24 * seconds_per_hour)
seconds_per_day = 24 * seconds_per_hour
print(seconds_per_day / seconds_per_hour)
print(seconds_per_day // seconds_per_hour)
|
normal
|
{
"blob_id": "358879d83ed3058530031d50fb69e3ce11fbd524",
"index": 1057,
"step-1": "<mask token>\n",
"step-2": "print(60 * 60)\n<mask token>\nprint(24 * seconds_per_hour)\n<mask token>\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"step-3": "print(60 * 60)\nseconds_per_hour = 60 * 60\nprint(24 * seconds_per_hour)\nseconds_per_day = 24 * seconds_per_hour\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from operation import *
import math
class InsertWord(Operation):
@classmethod
def getValue(cls, t, h, w, b, arg=None):
return math.log(arg["prob"]) * w[cls]
@classmethod
def getKBest(cls, t, h , args, w, b, k):
valueList = []
for index in args["parenNodeIndexList"]:
for word, prob in args["vocabDic"].items():
arg["parentNodeIndex"] = index
arg["word"] = word
arg["prob"] = prob
valueList.append([self.getValue(t, h, w, b, arg), arg])
valueList = sorted(ValueList, reverse=True)[:k-1]
return valueList
@classmethod
def transFormTree(cls, h, arg=None):
parentNodeIndex = arg["parentNodeIndex"]
word = arg["word"]
h[0].insert_node(word, parentNodeIndex)
@classmethod
def transFormT_H(cls, t_h, arg=None):
parentNodeIndex = arg["parentNodeInex"]
word = arg["word"]
t_h.hs.appens( [t_h.hs[len(t_h.hs) -1][0].insert_node(word, parentNodeIndex), ] )
|
normal
|
{
"blob_id": "355e3932c8bd9105e0c1ce9259e3b7416997523c",
"index": 3668,
"step-1": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n <mask token>\n <mask token>\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n",
"step-2": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n <mask token>\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n",
"step-3": "<mask token>\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h, args, w, b, k):\n valueList = []\n for index in args['parenNodeIndexList']:\n for word, prob in args['vocabDic'].items():\n arg['parentNodeIndex'] = index\n arg['word'] = word\n arg['prob'] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k - 1]\n return valueList\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n",
"step-4": "from operation import *\nimport math\n\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg['prob']) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h, args, w, b, k):\n valueList = []\n for index in args['parenNodeIndexList']:\n for word, prob in args['vocabDic'].items():\n arg['parentNodeIndex'] = index\n arg['word'] = word\n arg['prob'] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k - 1]\n return valueList\n\n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg['parentNodeIndex']\n word = arg['word']\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg['parentNodeInex']\n word = arg['word']\n t_h.hs.appens([t_h.hs[len(t_h.hs) - 1][0].insert_node(word,\n parentNodeIndex)])\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom operation import *\nimport math\n\nclass InsertWord(Operation):\n\n @classmethod\n def getValue(cls, t, h, w, b, arg=None):\n return math.log(arg[\"prob\"]) * w[cls]\n\n @classmethod\n def getKBest(cls, t, h , args, w, b, k):\n valueList = []\n for index in args[\"parenNodeIndexList\"]:\n for word, prob in args[\"vocabDic\"].items():\n arg[\"parentNodeIndex\"] = index\n arg[\"word\"] = word\n arg[\"prob\"] = prob\n valueList.append([self.getValue(t, h, w, b, arg), arg])\n valueList = sorted(ValueList, reverse=True)[:k-1]\n return valueList\n \n @classmethod\n def transFormTree(cls, h, arg=None):\n parentNodeIndex = arg[\"parentNodeIndex\"]\n word = arg[\"word\"]\n h[0].insert_node(word, parentNodeIndex)\n\n @classmethod\n def transFormT_H(cls, t_h, arg=None):\n parentNodeIndex = arg[\"parentNodeInex\"]\n word = arg[\"word\"]\n t_h.hs.appens( [t_h.hs[len(t_h.hs) -1][0].insert_node(word, parentNodeIndex), ] )\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import functools
import shutil
import tempfile
import unittest
import unittest.mock
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.io.extractors import training_trials, biased_trials, camera
from ibllib.io import raw_data_loaders as raw
from ibllib.io.extractors.base import BaseExtractor
def wheelMoves_fixture(func):
"""Decorator to save some dummy wheelMoves ALF files for extraction tests"""
@functools.wraps(func)
def wrapper(obj=None):
# Save some wheelMoves ALF files
attr_list = ['training_lt5',
'training_ge5',
'biased_lt5',
'biased_ge5']
alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]
n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]
for p, n in zip(alf_paths, n_trials):
p.mkdir()
np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))
np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))
# Run method
func(obj)
# Teardown; delete the files
for p in alf_paths:
shutil.rmtree(p)
return wrapper
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))
# turn off logging for unit testing as we will purposedly go into warning/error cases
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
# Save some dummy wheel moves data for trial firstMovement_times extraction
def test_get_feedbackType(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackType(
self.training_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = training_trials.FeedbackType(
self.training_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# BIASED SESSIONS
ft = biased_trials.FeedbackType(
self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = biased_trials.FeedbackType(
self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
# TRAINING SESSIONS
cl, cr = training_trials.ContrastLR(
self.training_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = training_trials.ContrastLR(
self.training_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# BIASED SESSIONS
cl, cr = biased_trials.ContrastLR(
self.biased_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = biased_trials.ContrastLR(
self.biased_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
# TRAINING SESSIONS
pl = training_trials.ProbabilityLeft(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# -- version >= 5.0.0
pl = training_trials.ProbabilityLeft(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# BIASED SESSIONS
pl = biased_trials.ProbabilityLeft(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
# -- version >= 5.0.0
pl = biased_trials.ProbabilityLeft(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
def test_get_choice(self):
# TRAINING SESSIONS
choice = training_trials.Choice(
session_path=self.training_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = training_trials.Choice(
session_path=self.training_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# BIASED SESSIONS
choice = biased_trials.Choice(
session_path=self.biased_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = biased_trials.Choice(
session_path=self.biased_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
# TODO: Test its sawtooth
# TRAINING SESSIONS
rn = training_trials.RepNum(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
# -- version >= 5.0.0
rn = training_trials.RepNum(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
# BIASED SESSIONS have no repeted trials
def test_get_rewardVolume(self):
# TRAINING SESSIONS
rv = training_trials.RewardVolume(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# -- version >= 5.0.0
rv = training_trials.RewardVolume(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# BIASED SESSIONS
rv = biased_trials.RewardVolume(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
# -- version >= 5.0.0
rv = biased_trials.RewardVolume(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
# TRAINING SESSIONS
sott = training_trials.StimOnTriggerTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = training_trials.StimOnTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# BIASED SESSIONS
sott = biased_trials.StimOnTriggerTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = biased_trials.StimOnTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
# TRAINING SESSIONS
di = training_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = training_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# BIASED SESSIONS
di = biased_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = biased_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
# TRAINING SESSIONS
rt = training_trials.ResponseTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = training_trials.ResponseTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# BIASED SESSIONS
rt = biased_trials.ResponseTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = biased_trials.ResponseTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
# TRAINING SESSIONS
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = training_trials.GoCueTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
# BIASED SESSIONS
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = biased_trials.GoCueTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
# TRAINING SESSIONS
gcot = training_trials.GoCueTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = training_trials.GoCueTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
# BIASED SESSIONS
gcot = biased_trials.GoCueTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = biased_trials.GoCueTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
# TRAINING SESSIONS
# Expect an error raised because no wheel moves were present in test data
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))
# -- version >= 5.0.0
out, files = training_trials.extract_all(self.training_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
# BIASED SESSIONS
# The new trials extractor additionally extracts the wheel data and this fails for the < 5.0
# test data so we will stub the wheel extractor
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = ({}, [])
out, files = biased_trials.extract_all(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
# -- version >= 5.0.0
out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
# TRAINING SESSIONS
# only for training?
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
# here we test for 2 kinds of file corruption that happen
# 1/2 the first sample time is corrupt and absurdly high and should be discarded
# 2/2 2 samples are swapped and need to be swapped backk
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
# the wheel folder contains other errors in bpod output that had to be addressed
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
# check the output dimensions
# VERSION >= 5.0.0
from ibllib.io.extractors.bpod_trials import extract_all
extract_all(self.training_ge5['path'])
trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
extract_all(self.biased_ge5['path'])
trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# VERSION < 5.0.0
# for these test data there are no wheel moves so let's mock the output
mock_data = {
'intervals': np.array([[0, 1], ]),
'peakAmplitude': np.array([1, 1]),
'peakVelocity_times': np.array([1, 1])}
function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'
# Training
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.training_lt5['path'])
trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# Biased
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.biased_lt5['path'])
trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + .001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) -> None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = (
"some_file.csv",
"some_file.tsv",
"some_file.ssv",
"some_file.npy",
)
var_names = (
"csv",
"ssv",
"tsv",
"npy",
)
def _extract(self, **kwargs) -> tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return (csv, ssv, tsv, npy)
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
# self.addClassCleanup(tempdir.cleanup) # py3.8
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
# UNIT DATA
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
# Add drift
ts += np.full_like(ts, 1e-4).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.
audio = {'times': np.empty(n_pulses * 2),
'polarities': gpio['polarities']}
for p in range(n_pulses):
i = p * 2
rise = (pulse_width * p) + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, 'Audio dict shouldn\'t be effected')
np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])
# Broken TTLs + extra TTL
delay = 0.08
pulse_width = 1e-5
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
# One front shifted by a large amount
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))
def test_attribute_times(self, display=False):
# Create two timestamp arrays at two different frequencies
tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm
tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm
tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front
tsb = np.sort(np.append(tsb, .41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
# Check with default args
matches = camera.attribute_times(tsa, tsb)
expected = np.array(
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,
22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,
45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
# Taking closest instead of first should change index of ambiguous front
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
# Taking first after should exclude many pulses
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,
22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
# Lower tolerance
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
# Remove injective assert
matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')
expected = np.array(
[0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,
24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,
46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
# Check input validation
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
|
normal
|
{
"blob_id": "f17d33f1d035da42dc9a2b4c0c60beefc6a48dea",
"index": 64,
"step-1": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n <mask token>\n <mask token>\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <mask token>\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-5": "import functools\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport one.alf.io as alfio\nfrom ibllib.io.extractors import training_trials, biased_trials, camera\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.extractors.base import BaseExtractor\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n @functools.wraps(func)\n def wrapper(obj=None):\n # Save some wheelMoves ALF files\n attr_list = ['training_lt5',\n 'training_ge5',\n 'biased_lt5',\n 'biased_ge5']\n alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n\n # Run method\n func(obj)\n\n # Teardown; delete the files\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))\n # turn off logging for unit testing as we will purposedly go into warning/error cases\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n # Save some dummy wheel moves data for trial firstMovement_times extraction\n\n def test_get_feedbackType(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackType(\n self.training_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = training_trials.FeedbackType(\n self.training_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackType(\n self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = biased_trials.FeedbackType(\n self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n # TRAINING SESSIONS\n cl, cr = training_trials.ContrastLR(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = training_trials.ContrastLR(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n # BIASED SESSIONS\n cl, cr = biased_trials.ContrastLR(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = biased_trials.ContrastLR(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n # TRAINING SESSIONS\n pl = training_trials.ProbabilityLeft(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # -- version >= 5.0.0\n pl = training_trials.ProbabilityLeft(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n\n # BIASED SESSIONS\n pl = biased_trials.ProbabilityLeft(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n # -- version >= 5.0.0\n pl = biased_trials.ProbabilityLeft(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n\n def test_get_choice(self):\n # TRAINING SESSIONS\n choice = training_trials.Choice(\n session_path=self.training_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = training_trials.Choice(\n session_path=self.training_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n # BIASED SESSIONS\n choice = biased_trials.Choice(\n session_path=self.biased_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = biased_trials.Choice(\n session_path=self.biased_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n # TODO: Test its sawtooth\n # TRAINING SESSIONS\n rn = training_trials.RepNum(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n # -- version >= 5.0.0\n rn = training_trials.RepNum(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n # BIASED SESSIONS have no repeted trials\n\n def test_get_rewardVolume(self):\n # TRAINING SESSIONS\n rv = training_trials.RewardVolume(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # -- version >= 5.0.0\n rv = training_trials.RewardVolume(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n\n # BIASED SESSIONS\n rv = biased_trials.RewardVolume(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n # -- version >= 5.0.0\n rv = biased_trials.RewardVolume(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n # TRAINING SESSIONS\n sott = training_trials.StimOnTriggerTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = training_trials.StimOnTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # BIASED SESSIONS\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n # TRAINING SESSIONS\n di = training_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = training_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n # BIASED SESSIONS\n di = biased_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = biased_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n # TRAINING SESSIONS\n rt = training_trials.ResponseTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = training_trials.ResponseTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n # BIASED SESSIONS\n rt = biased_trials.ResponseTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = biased_trials.ResponseTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n # TRAINING SESSIONS\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = training_trials.GoCueTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n # BIASED SESSIONS\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = biased_trials.GoCueTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n # TRAINING SESSIONS\n gcot = training_trials.GoCueTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = training_trials.GoCueTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n\n # BIASED SESSIONS\n gcot = biased_trials.GoCueTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = biased_trials.GoCueTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n # TRAINING SESSIONS\n # Expect an error raised because no wheel moves were present in test data\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(\n self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))\n # -- version >= 5.0.0\n out, files = training_trials.extract_all(self.training_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n # BIASED SESSIONS\n # The new trials extractor additionally extracts the wheel data and this fails for the < 5.0\n # test data so we will stub the wheel extractor\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = ({}, [])\n out, files = biased_trials.extract_all(\n self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n # -- version >= 5.0.0\n out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n # TRAINING SESSIONS\n # only for training?\n path = self.training_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n # here we test for 2 kinds of file corruption that happen\n # 1/2 the first sample time is corrupt and absurdly high and should be discarded\n # 2/2 2 samples are swapped and need to be swapped backk\n path = self.biased_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n # -- version >= 5.0.0\n path = self.biased_ge5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n # the wheel folder contains other errors in bpod output that had to be addressed\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n # check the output dimensions\n # VERSION >= 5.0.0\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # VERSION < 5.0.0\n # for these test data there are no wheel moves so let's mock the output\n mock_data = {\n 'intervals': np.array([[0, 1], ]),\n 'peakAmplitude': np.array([1, 1]),\n 'peakVelocity_times': np.array([1, 1])}\n function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'\n # Training\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # Biased\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + .001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) -> None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = (\n \"some_file.csv\",\n \"some_file.tsv\",\n \"some_file.ssv\",\n \"some_file.npy\",\n )\n var_names = (\n \"csv\",\n \"ssv\",\n \"tsv\",\n \"npy\",\n )\n\n def _extract(self, **kwargs) -> tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n\n return (csv, ssv, tsv, npy)\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n def setUp(self) -> None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n # self.addClassCleanup(tempdir.cleanup) # py3.8\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n def test_groom_pin_state(self):\n # UNIT DATA\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n # Add drift\n ts += np.full_like(ts, 1e-4).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.\n audio = {'times': np.empty(n_pulses * 2),\n 'polarities': gpio['polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = (pulse_width * p) + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, 'Audio dict shouldn\\'t be effected')\n np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])\n\n # Broken TTLs + extra TTL\n delay = 0.08\n pulse_width = 1e-5\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n\n # One front shifted by a large amount\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))\n\n def test_attribute_times(self, display=False):\n # Create two timestamp arrays at two different frequencies\n tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm\n tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm\n tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front\n tsb = np.sort(np.append(tsb, .41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n\n # Check with default args\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array(\n [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,\n 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,\n 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n\n # Taking closest instead of first should change index of ambiguous front\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n\n # Taking first after should exclude many pulses\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,\n 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n\n # Lower tolerance\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n\n # Remove injective assert\n matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')\n expected = np.array(\n [0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,\n 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,\n 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n\n # Check input validation\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False, verbosity=2)\n",
"step-ids": [
27,
34,
37,
45,
49
]
}
|
[
27,
34,
37,
45,
49
] |
import dnf_converter
def parse(query):
print("parsing the query...")
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query["$or"]:
clauses = []
if "$and" in cp:
for clause in cp["$and"]:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({ "cp": cp, "clauses": clauses })
return cp_clause_list, clause_list
|
normal
|
{
"blob_id": "999de0965efa3c1fe021142a105dcf28184cd5ba",
"index": 43,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-3": "import dnf_converter\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-4": "import dnf_converter\n\ndef parse(query):\n\tprint(\"parsing the query...\")\n\tquery = dnf_converter.convert(query)\n\tcp_clause_list = []\n\tclause_list = []\n\tfor cp in query[\"$or\"]:\n\t\tclauses = []\n\t\tif \"$and\" in cp:\n\t\t\tfor clause in cp[\"$and\"]:\n\t\t\t\tclauses.append(clause)\n\t\t\t\tclause_list.append(clause)\n\t\telse:\n\t\t\tclause = cp\n\t\t\tclauses.append(clause)\n\t\t\tclause_list.append(clause)\n\t\tcp_clause_list.append({ \"cp\": cp, \"clauses\": clauses })\n\treturn cp_clause_list, clause_list",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from psycopg2 import ProgrammingError, IntegrityError
import datetime
from loguru import logger
from db.connect import open_cursor, open_connection
_log_file_name = __file__.split("/")[-1].split(".")[0]
logger.add(f"logs/{_log_file_name}.log", rotation="1 day")
class DataTypeSaveError(Exception):
pass
class TypeValidationError(Exception):
pass
class MultipleRowsError(Exception):
pass
class DoesNotExist(Exception):
pass
type_map = {str: "%s", int: "%d", float: "%f"}
class BaseDataClass:
def _create_insert_query(self):
column_names = ""
row_values = ""
values = []
for column_name, row_value in self.__dict__.items():
if column_name.startswith("_"):
continue
if column_name == "id" and row_value is None:
# If id is None, leave it to the db to deal with incrementing the pk.
continue
column_names += str(column_name) + ", "
row_values += "%s, "
values.append(row_value)
columns = "(" + column_names[:-2] + ")"
values_reprs = "(" + row_values[:-2] + ")"
query = f"INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;"
return query, values
@classmethod
def _create_select_query(cls, **kwargs):
key_value_pairs = ""
for key, value in kwargs.items():
if value is None:
continue
key_value_pairs += f"{key} = '{value}' AND "
key_value_pairs = key_value_pairs[:-5]
query = f"SELECT * FROM {cls._table_name} WHERE {key_value_pairs};"
return query
def save(self, commit=True, with_get=True):
"""Store conent to database.
This should be thread safe by using asyncio's Lock in open_cursor.
"""
self.validate()
logger.debug(f"Save: {self}")
query, values = self._create_insert_query()
with open_connection() as conn:
with open_cursor(conn) as cursor:
try:
cursor.execute(query, tuple(values))
if with_get:
_id = cursor.fetchone()[0]
logger.debug(f"Saved value with id: {_id}")
self.id = _id or self.id
if not self.id:
logger.warning(f"Returned with an empty id. {self}")
if commit:
conn.commit()
except ProgrammingError as e:
logger.error(e)
raise DataTypeSaveError
except IntegrityError as e:
logger.warning(f"Could not save: {self}")
logger.error(e)
return self
def clean(self):
logger.debug(f"Cleaning: {self}")
def validate(self):
annotations = self.__annotations__
keys_ = annotations.keys()
fields = self.__dict__
for key in keys_:
if not isinstance(fields[key], annotations[key]):
if key == "id" and fields[key] is None:
# Pass None to id and allow the DB to increment it.
continue
if key in self._ignore_fields:
continue
try:
self.__dict__[key] = annotations[key](fields[key])
except (TypeError, ValueError) as e:
logger.error(
f"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}."
)
logger.error(e)
raise TypeValidationError(
f"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}."
)
@classmethod
def prepare(cls, *args):
return args
@classmethod
def create(cls, with_get=False, **kwargs):
inst = cls(**kwargs)
inst.clean()
inst.save(with_get=with_get)
return inst
@classmethod
def _get_rows(cls, **kwargs):
logger.debug(f"{cls}._get_rows")
query = cls._create_select_query(**kwargs)
with open_connection() as conn:
with open_cursor(conn) as cursor:
cursor.execute(query)
rows = cursor.fetchall()
return rows
@classmethod
def all(cls, **kwargs):
logger.debug(f"Get all: {cls}")
rows = cls._get_rows(**kwargs)
instances = []
for row in rows:
instances.append(cls(*row))
return instances
@classmethod
def get(cls, **kwargs):
logger.debug(f"Get: {cls}")
rows = cls._get_rows(**kwargs)
logger.debug(f"Rows: {rows}")
if not rows:
raise DoesNotExist(f"{cls}({kwargs}")
if len(rows) > 1:
raise MultipleRowsError(f"Got {len(rows)} entries in {cls}.get()")
if isinstance(rows, list):
row = rows[0]
else:
row = rows
return cls(*row)
def get_id(self):
logger.debug(f"Get own id: {self}.")
return self.__class__.get(**self.__dict__).id
|
normal
|
{
"blob_id": "8339ac512d851ea20938a1fbeedcb751cb2b8a6a",
"index": 4337,
"step-1": "<mask token>\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-3": "<mask token>\n_log_file_name = __file__.split('/')[-1].split('.')[0]\nlogger.add(f'logs/{_log_file_name}.log', rotation='1 day')\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: '%s', int: '%d', float: '%f'}\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-4": "from psycopg2 import ProgrammingError, IntegrityError\nimport datetime\nfrom loguru import logger\nfrom db.connect import open_cursor, open_connection\n_log_file_name = __file__.split('/')[-1].split('.')[0]\nlogger.add(f'logs/{_log_file_name}.log', rotation='1 day')\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: '%s', int: '%d', float: '%f'}\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-5": "from psycopg2 import ProgrammingError, IntegrityError\nimport datetime\nfrom loguru import logger\n\nfrom db.connect import open_cursor, open_connection\n\n\n_log_file_name = __file__.split(\"/\")[-1].split(\".\")[0]\nlogger.add(f\"logs/{_log_file_name}.log\", rotation=\"1 day\")\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: \"%s\", int: \"%d\", float: \"%f\"}\n\n\nclass BaseDataClass:\n def _create_insert_query(self):\n\n column_names = \"\"\n row_values = \"\"\n values = []\n for column_name, row_value in self.__dict__.items():\n\n if column_name.startswith(\"_\"):\n continue\n\n if column_name == \"id\" and row_value is None:\n # If id is None, leave it to the db to deal with incrementing the pk.\n continue\n\n column_names += str(column_name) + \", \"\n row_values += \"%s, \"\n values.append(row_value)\n\n columns = \"(\" + column_names[:-2] + \")\"\n values_reprs = \"(\" + row_values[:-2] + \")\"\n\n query = f\"INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;\"\n\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n\n key_value_pairs = \"\"\n for key, value in kwargs.items():\n\n if value is None:\n continue\n\n key_value_pairs += f\"{key} = '{value}' AND \"\n\n key_value_pairs = key_value_pairs[:-5]\n\n query = f\"SELECT * FROM {cls._table_name} WHERE {key_value_pairs};\"\n\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f\"Save: {self}\")\n query, values = self._create_insert_query()\n\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f\"Saved value with id: {_id}\")\n self.id = _id or self.id\n if not self.id:\n logger.warning(f\"Returned with an empty id. {self}\")\n if commit:\n conn.commit()\n\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f\"Could not save: {self}\")\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f\"Cleaning: {self}\")\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == \"id\" and fields[key] is None:\n # Pass None to id and allow the DB to increment it.\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f\"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.\"\n )\n logger.error(e)\n raise TypeValidationError(\n f\"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.\"\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f\"{cls}._get_rows\")\n query = cls._create_select_query(**kwargs)\n\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f\"Get all: {cls}\")\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f\"Get: {cls}\")\n rows = cls._get_rows(**kwargs)\n logger.debug(f\"Rows: {rows}\")\n\n if not rows:\n raise DoesNotExist(f\"{cls}({kwargs}\")\n\n if len(rows) > 1:\n raise MultipleRowsError(f\"Got {len(rows)} entries in {cls}.get()\")\n\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n\n return cls(*row)\n\n def get_id(self):\n logger.debug(f\"Get own id: {self}.\")\n return self.__class__.get(**self.__dict__).id\n",
"step-ids": [
6,
12,
18,
19,
20
]
}
|
[
6,
12,
18,
19,
20
] |
'''
A prime number is a natural number greater than 1 that has no positive divisors other than 1 and itself. Given two integers A and B,
print the number of primes between them, inclusively.
'''
a = int(input())
b = int(input())
count = 0
for i in range(a, b+1):
true_prime = True
for num in range(2, i):
if i % num == 0:
true_prime = False
if true_prime:
count += 1
print(count)
|
normal
|
{
"blob_id": "ed4c97913a9dba5cf6be56050a8d2ce24dbd6033",
"index": 1870,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(a, b + 1):\n true_prime = True\n for num in range(2, i):\n if i % num == 0:\n true_prime = False\n if true_prime:\n count += 1\nprint(count)\n",
"step-3": "<mask token>\na = int(input())\nb = int(input())\ncount = 0\nfor i in range(a, b + 1):\n true_prime = True\n for num in range(2, i):\n if i % num == 0:\n true_prime = False\n if true_prime:\n count += 1\nprint(count)\n",
"step-4": "'''\nA prime number is a natural number greater than 1 that has no positive divisors other than 1 and itself. Given two integers A and B, \nprint the number of primes between them, inclusively.\n'''\n\na = int(input())\nb = int(input())\ncount = 0\nfor i in range(a, b+1):\n true_prime = True\n \n for num in range(2, i):\n if i % num == 0:\n true_prime = False\n if true_prime:\n count += 1\n\nprint(count)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
# from django.contrib.admin import AdminSite
# class MyAdminSite(AdminSite):
# site_header = 'Finder Administration'
# admin_site = MyAdminSite(name='Finder Admin')
from finder.models import Database, Column, GpsData, Alarm, System
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes= ('collapse',)
class DatabaseAdmin(admin.ModelAdmin):
# fieldsets = [
# (None, {'fields': ['database_id']}),
# ('Database Info', {#'classes': ('collapse',),
# 'fields': ['rows',
# 'missing_rows',
# 'columns_count',
# 'columns_geo_count',
# 'columns_numeric_count',
# 'columns_temporal_count',
# 'columns_text_count',
# 'values',
# 'values_missing']}
# ),
# ('Profiler Info', {#'classes': ('collapse',),
# 'fields': ['profiler_input_file',
# 'profiler_status',
# 'profiler_time_begin',
# 'profiler_time_end',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('Socrata Metadata', {#'classes': ('collapse',),
# 'fields': ['socrata_status',
# 'socrata_description',
# 'socrata_category',
# 'socrata_owner',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('GPS Data', {#'classes': ('collapse',),
# 'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}
# ),
# ]
list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status',
#'socrata_primary', 'rows', 'columns_count', 'missing_percent',
'source_agency',
'has_bounding_box')
search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)
list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline
#, GPSInline
]
admin.site.register(Database, DatabaseAdmin)
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
admin.site.register(Alarm, AlarmAdmin)
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
admin.site.register(System, SystemAdmin)
|
normal
|
{
"blob_id": "e1968e0d6146ce7656505eeed8e9f31daa4b558a",
"index": 5447,
"step-1": "<mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GPSInline(admin.TabularInline):\n <mask token>\n <mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\nadmin.site.register(Database, DatabaseAdmin)\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\nadmin.site.register(Alarm, AlarmAdmin)\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\nadmin.site.register(System, SystemAdmin)\n",
"step-5": "from django.contrib import admin\n\n# from django.contrib.admin import AdminSite\n# class MyAdminSite(AdminSite):\n# site_header = 'Finder Administration'\n# admin_site = MyAdminSite(name='Finder Admin')\n\n\nfrom finder.models import Database, Column, GpsData, Alarm, System\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes= ('collapse',)\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n # fieldsets = [\n # \t\t\t\t(None, {'fields': ['database_id']}),\n # \t\t\t\t('Database Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['rows',\n # \t\t\t\t\t\t\t\t\t\t\t 'missing_rows', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_geo_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_numeric_count', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_temporal_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_text_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'values',\n # \t\t\t\t\t\t\t\t\t\t\t 'values_missing']}\n # \t\t\t),\n # \t\t\t('Profiler Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['profiler_input_file',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_status', \n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_begin',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_end',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('Socrata Metadata', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['socrata_status',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_description', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_category',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_owner',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('GPS Data', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}\n # \t\t\t),\n # \t\t\t]\n\n list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status', \n #'socrata_primary', 'rows', 'columns_count', 'missing_percent', \n 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)\n list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']\n\n prepopulated_fields = {'name': ('database_id',)}\n\n inlines = [ColumnInline\n #, GPSInline\n ]\n \nadmin.site.register(Database, DatabaseAdmin)\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\nadmin.site.register(Alarm, AlarmAdmin)\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\nadmin.site.register(System, SystemAdmin)\n\n",
"step-ids": [
5,
7,
10,
11,
13
]
}
|
[
5,
7,
10,
11,
13
] |
import os, subprocess, time
from os.path import isfile, join
import shutil # to move files from af folder to another
import math
def GetListFile(PathFile, FileExtension):
return [os.path.splitext(f)[0] for f in os.listdir(PathFile) if isfile(join(PathFile, f)) and os.path.splitext(f)[1] == '.' + FileExtension]
def openfile(Path):
fileIn = open(Path, "r")
lines = fileIn.readlines()
fileIn.close()
return lines
import re
def parse_Ct_Structure(Path):
lines = openfile(Path)
# Get the initial position of the read and it Series ||||| with mutations
# replace one space in case of multi spaces re.sub( '\s+', ' ', mystring ).strip()
#print Path
#print [int(re.sub( '\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]
return [int(re.sub( '\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]
def parse_SHAPE(Path):
lines = openfile(Path)
lista=[]
# Get the initial position of the read and it Series ||||| with mutations
# replace one space in case of multi spaces re.sub( '\s+', ' ', mystring ).strip()
Intermediate=[re.sub( '\s+', '\t', elem ).strip().split('\t') for elem in lines]
for elem in Intermediate:
if len(elem)>2 and float(elem[2])!=0:
lista.append(float(elem[2]))
else:
lista.append(-10)
return lista
def Pairing_status(structure):# a line in ct format with a number for the partner if paired and 0 if not
status=[]
for value in structure:
#print value,'lol'
if value== '(' or value== ')':
status.append('P')
if value =='.':
status.append('Un')
if value=='x':
status.append('PK')
return status
def plot2D(x,y,titre):
import matplotlib.pyplot as plt
import numpy as np
plt.plot(x,y,'r.')
plt.xlabel('shape')
plt.ylabel('unpaired probability')
plt.title(titre)
# fitting functions
plt.show()
def distribution_plots(Y,titre):
# plot the x distribution
import matplotlib.pyplot as plt
import numpy as np
'''
d = {x: Y.count(x) for x in Y}
print d
plt.plot(d.keys(),d.values(),'.')
'''
plt.hist(Y, bins=10, color='green')
plt.ylabel('Frequencies')
plt.title(titre)
# fitting functions
#plt.show()
plt.savefig(titre)
def GetbasepairsProb(path_Fasta, fil, FileExtensionFasta,Path_dot_plot):
listProbaPairs=[]
SingleProba=[]
FastaPath=os.path.join(path_Fasta, fil + '.' + FileExtensionFasta)
rna =openfile(FastaPath)[1]
#print rna
os.system("RNAfold -p -d2 --noLP <" + FastaPath+ ">output.txt")
PSPath=os.path.join( Path_dot_plot,fil+"_dp.ps")
shutil.move(fil+"_dp.ps",PSPath)
os.remove(fil+"_ss.ps")
#print fil,'rr'
bpm = loadDotPlotPS(PSPath)
dp = DotPlot(rna, bpm)
for i in range(len(rna)):
for j in range(i, len(rna)):
if dp.getBPProb(i, j) > 0:# get only non null probabilities
listProbaPairs.append((i,j,dp.getBPProb(i, j)))
SingleProba=dp.getUnpairedProbs()
return listProbaPairs, SingleProba
#!!!!!!!!!!!!!!!!!!!!!!!!!!!! loadDotPlotPS(path)
def loadDotPlotPS(path):
res = {}
outTmp = open(path)
for l in outTmp:
data = l[:-1].split()
if len(data) == 4 and data[3]=="ubox":
i = int(data[0])-1
j = int(data[1])-1
p = math.pow(float(data[2]),2.)
res[i,j] = p
outTmp.close()
return res
def parse_rdat(Path):
lines = openfile(Path)
RNA=[]
seq=dict()
struct=dict()
reactivity=dict()
for line in lines:
#print line.split('\t')[0]
if line.split('\t')[0]== 'NAME':
RNA.append(line.split('\t')[1][:-1])
Id= line.split('\t')[1][:-1]
#print 'id',Id
if line.split('\t')[0]=='SEQUENCE':
seq[Id]= line.split('\t')[1][:-2]
if line.split('\t')[0]=='STRUCTURE':
struct[Id]=line.split('\t')[1][:-2]
if line.split('\t')[0]=='REACTIVITY:1':
#print line.split('\t')[1:-1]
reactivity[Id]=line.split('\t')[1:-1]
return RNA,seq, struct,reactivity
def create_fasta_shapeFiles(RNA,seq, struct,reactivity,path_fasta,path_SHAPE):
for Id in RNA:
Outfasta=open(os.path.join(path_fasta, Id+'.fa'),'w')
OutShape=open(os.path.join(path_SHAPE, Id+'.shape'),'w')
Outfasta.write("%s \n" % (">"+ Id))
Outfasta.write("%s \n" % (seq[Id]))
Outfasta.write("%s " % (struct[Id]))
Outfasta.close()
#print Id, len(reactivity[Id]),len(seq[Id])
for i, val in enumerate(reactivity[Id][:-1]):
#print reactivity[Id]
if i <len(seq[Id])and val!=" ":
print Id,i, seq[Id][i],"FF",val
OutShape.write("%i \t %s \t %f \n"%(i+1,seq[Id][i],float(val)))
#print "done"
class DotPlot:
"""Class for holding/producing base-pair probability matrices"""
def __init__(self, rna , bpm = None):
self.rna = rna[:]
if bpm is None:
# we will avoid this case to be sure that rnafold from the min works well
self.bpm = self.runRNAFold()
else:
self.bpm = bpm
def getSeq(self):
return self.rna
def getBPProb(self,i,j):
if (i,j) in self.bpm:
return self.bpm[i,j]
else:
return 0.
def getUnpairedProbs(self):
res = [1. for i in self.rna]
for i,j in self.bpm:
res[i] -= self.bpm[i,j]
res[j] -= self.bpm[i,j]
return res
def Parsefile(Path):
fileIn = open(Path, "r")
lines = fileIn.readlines()
fileIn.close()
return lines
def parseReactivityfile(fileinput):
Reactvities=[]
lines=Parsefile(fileinput)
for it in range(len(lines)):
if (lines[it].split("\t")[2][:-1]):
Reactvities.append(lines[it].split("\t")[2][:-1])
else:
Reactvities.append(-10)
return Reactvities
if __name__ == '__main__':
####################"" To parametrize ##########################
FileExtensionshape ='shape'
react='NMIA'
path_SHAPE='SHAPE_files_NMIA'
path_Fasta = 'fasta_files'
FileExtensionFasta = 'fa'
Shape={}
States={}
BP={}
UNP={}
Quaternary = {}
tertiary = {}
Unpaired2 = {}
lisTIP = []
lisTUn = []
lisHE = []
lisHES = []
lisTIPinterne=[]
lisTIPexterne=[]
SPTIP = []
SPTUn = []
SPHE = []
SPHES = []
SPTIPinterne = []
SPTIPexterne = []
lisIP = [] # shape values for paired Positions
lisUn = [] # shape values for unpaired Positions
SPIP = [] # probability of being unpaired from McCaskill for IP category
SPUn = [] # probability of being unpaired from McCaskill for Un category
RNA=[]
struct=dict()
reactivity=dict()
list3=[]
for filz in GetListFile(path_Fasta, FileExtensionFasta):
#print reactivity
rna = Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[1]
structure=Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[2]
States[filz] = Pairing_status(structure)
reactivity[filz]=parseReactivityfile(os.path.join(path_SHAPE, filz + react+'Shape.txt'))
# Get the end-Helix positions
print "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG",filz, len(States[filz])
print States[filz][-1]
for i,elem in enumerate(States[filz]):
if elem=='Un' or elem=='PK':
list3.append(elem)
if elem=='P':
#print i, elem,i-1,i+1, States[filz][i+1]
if i in range(1,len(rna)-3) and (States[filz][i-1]=='Un' or States[filz][i-1]=='PK' or States[filz][i+1]=='Un' or States[filz][i-1]=='PK' ): # wh should add PK constraint because we are looking for stacked substructures that does not take into account thhe tertiary or pseudoknots extension!!
list3.append('HE')
else:
list3.append(elem)
cum=[]
for filz in GetListFile(path_Fasta, FileExtensionFasta):
if reactivity[filz]==[]:
print "warning!! Empty reactivity",rna
cum=cum+reactivity[filz]
Shape=cum
#print len(Shape)
lIP = [] # shape values for paired Positions
lUn = []
lHE =[]
# for structural_study
#print [elem[:-1] for elem in Shape]
for nucl,shape in zip(list3,Shape):
if shape!=-10 and nucl=='P':
print "la vaaleur", shape
lIP.append( float(shape))
#SPIP.append(UnpPb )
if shape!=-10 and nucl=='Un':
lUn.append( float(shape))
#SPUn.append(UnpPb )
if shape!=-10 and nucl=='HE':
lHE.append( float(shape))
import numpy as np
labels=["Stacked nucleotides","Unpaired nucleotides","Helix-end"]
lists= [lIP,lUn, lHE]
for (data, title) in [(lIP, "P" + str(react)), (lUn, "U"+ str(react)),(lHE,"HE"+ str(react))]:
print title ,'\n' , data
|
normal
|
{
"blob_id": "8b671404228642f7ef96844c33ac3cee402bdb19",
"index": 1279,
"step-1": "import os, subprocess, time\nfrom os.path import isfile, join\nimport shutil # to move files from af folder to another\nimport math\n\ndef GetListFile(PathFile, FileExtension):\n return [os.path.splitext(f)[0] for f in os.listdir(PathFile) if isfile(join(PathFile, f)) and os.path.splitext(f)[1] == '.' + FileExtension]\n\n\ndef openfile(Path):\n fileIn = open(Path, \"r\")\n lines = fileIn.readlines()\n fileIn.close()\n return lines\n\n\nimport re\ndef parse_Ct_Structure(Path):\n lines = openfile(Path)\n # Get the initial position of the read and it Series ||||| with mutations\n # replace one space in case of multi spaces re.sub( '\\s+', ' ', mystring ).strip()\n #print Path\n #print [int(re.sub( '\\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]\n return [int(re.sub( '\\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]\n\ndef parse_SHAPE(Path):\n lines = openfile(Path)\n lista=[]\n # Get the initial position of the read and it Series ||||| with mutations\n # replace one space in case of multi spaces re.sub( '\\s+', ' ', mystring ).strip()\n\n Intermediate=[re.sub( '\\s+', '\\t', elem ).strip().split('\\t') for elem in lines]\n for elem in Intermediate:\n if len(elem)>2 and float(elem[2])!=0:\n lista.append(float(elem[2])) \n else:\n lista.append(-10)\n return lista\ndef Pairing_status(structure):# a line in ct format with a number for the partner if paired and 0 if not\n status=[]\n for value in structure:\n #print value,'lol'\n if value== '(' or value== ')':\n status.append('P')\n\tif value =='.':\n\t status.append('Un') \n if value=='x':\n status.append('PK')\n return status\n\ndef plot2D(x,y,titre):\n import matplotlib.pyplot as plt\n import numpy as np\n\n plt.plot(x,y,'r.')\n plt.xlabel('shape')\n plt.ylabel('unpaired probability')\n plt.title(titre)\n # fitting functions\n plt.show()\ndef distribution_plots(Y,titre):\n # plot the x distribution\n\n import matplotlib.pyplot as plt\n import numpy as np\n '''\n d = {x: Y.count(x) for x in Y}\n print d\n plt.plot(d.keys(),d.values(),'.')\n '''\n plt.hist(Y, bins=10, color='green')\n plt.ylabel('Frequencies')\n plt.title(titre)\n # fitting functions\n #plt.show()\n plt.savefig(titre)\n\ndef GetbasepairsProb(path_Fasta, fil, FileExtensionFasta,Path_dot_plot):\n\n listProbaPairs=[]\n SingleProba=[]\n FastaPath=os.path.join(path_Fasta, fil + '.' + FileExtensionFasta)\n rna =openfile(FastaPath)[1]\n #print rna\n os.system(\"RNAfold -p -d2 --noLP <\" + FastaPath+ \">output.txt\")\n PSPath=os.path.join( Path_dot_plot,fil+\"_dp.ps\")\n shutil.move(fil+\"_dp.ps\",PSPath)\n os.remove(fil+\"_ss.ps\")\n #print fil,'rr'\n bpm = loadDotPlotPS(PSPath)\n dp = DotPlot(rna, bpm)\n\n for i in range(len(rna)):\n for j in range(i, len(rna)):\n if dp.getBPProb(i, j) > 0:# get only non null probabilities\n listProbaPairs.append((i,j,dp.getBPProb(i, j)))\n\n SingleProba=dp.getUnpairedProbs()\n return listProbaPairs, SingleProba\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!! loadDotPlotPS(path)\ndef loadDotPlotPS(path):\n res = {}\n outTmp = open(path)\n for l in outTmp:\n data = l[:-1].split()\n if len(data) == 4 and data[3]==\"ubox\":\n i = int(data[0])-1\n j = int(data[1])-1\n p = math.pow(float(data[2]),2.)\n res[i,j] = p\n outTmp.close()\n return res\n\ndef parse_rdat(Path):\n\tlines = openfile(Path)\n\tRNA=[]\n\tseq=dict()\n\tstruct=dict()\n\treactivity=dict()\n\tfor line in lines:\n\t\t#print line.split('\\t')[0]\n\t\tif line.split('\\t')[0]== 'NAME':\n\t\t\tRNA.append(line.split('\\t')[1][:-1])\n\t\t Id= line.split('\\t')[1][:-1]\n\t\t #print 'id',Id\n\t\tif line.split('\\t')[0]=='SEQUENCE':\n\t\t\tseq[Id]= line.split('\\t')[1][:-2]\n\t\tif line.split('\\t')[0]=='STRUCTURE':\n\t\t\tstruct[Id]=line.split('\\t')[1][:-2]\n\t\tif line.split('\\t')[0]=='REACTIVITY:1':\n #print line.split('\\t')[1:-1]\n\t\t\treactivity[Id]=line.split('\\t')[1:-1]\n\treturn RNA,seq, struct,reactivity\n\n\ndef create_fasta_shapeFiles(RNA,seq, struct,reactivity,path_fasta,path_SHAPE):\n for Id in RNA:\n\t Outfasta=open(os.path.join(path_fasta, Id+'.fa'),'w')\n\t OutShape=open(os.path.join(path_SHAPE, Id+'.shape'),'w')\n\t Outfasta.write(\"%s \\n\" % (\">\"+ Id))\n\t Outfasta.write(\"%s \\n\" % (seq[Id]))\n\t Outfasta.write(\"%s \" % (struct[Id]))\n\t Outfasta.close()\n #print Id, len(reactivity[Id]),len(seq[Id]) \n\t for i, val in enumerate(reactivity[Id][:-1]):\n #print reactivity[Id]\n if i <len(seq[Id])and val!=\" \": \n print Id,i, seq[Id][i],\"FF\",val\n\t\t\tOutShape.write(\"%i \\t %s \\t %f \\n\"%(i+1,seq[Id][i],float(val)))\n #print \"done\"\nclass DotPlot:\n \"\"\"Class for holding/producing base-pair probability matrices\"\"\"\n def __init__(self, rna , bpm = None):\n self.rna = rna[:]\n if bpm is None:\n # we will avoid this case to be sure that rnafold from the min works well\n self.bpm = self.runRNAFold()\n else:\n self.bpm = bpm\n def getSeq(self):\n return self.rna\n\n def getBPProb(self,i,j):\n if (i,j) in self.bpm:\n return self.bpm[i,j]\n else:\n return 0.\n\n def getUnpairedProbs(self):\n res = [1. for i in self.rna]\n for i,j in self.bpm:\n res[i] -= self.bpm[i,j]\n res[j] -= self.bpm[i,j]\n return res\n\ndef Parsefile(Path):\n fileIn = open(Path, \"r\")\n lines = fileIn.readlines()\n fileIn.close()\n return lines\n\n\ndef parseReactivityfile(fileinput):\n Reactvities=[]\n lines=Parsefile(fileinput)\n for it in range(len(lines)):\n if (lines[it].split(\"\\t\")[2][:-1]):\n \tReactvities.append(lines[it].split(\"\\t\")[2][:-1])\n\t\telse:\n\t\t\tReactvities.append(-10)\n return Reactvities\n\n\n\nif __name__ == '__main__':\n ####################\"\" To parametrize ##########################\n FileExtensionshape ='shape'\n react='NMIA'\n path_SHAPE='SHAPE_files_NMIA'\n path_Fasta = 'fasta_files'\n FileExtensionFasta = 'fa'\n\n\n Shape={}\n States={}\n\n BP={}\n UNP={}\n Quaternary = {}\n tertiary = {}\n Unpaired2 = {}\n lisTIP = []\n lisTUn = []\n lisHE = []\n lisHES = []\n lisTIPinterne=[]\n lisTIPexterne=[]\n SPTIP = []\n SPTUn = []\n SPHE = []\n SPHES = []\n SPTIPinterne = []\n SPTIPexterne = []\n\n lisIP = [] # shape values for paired Positions\n lisUn = [] # shape values for unpaired Positions\n SPIP = [] # probability of being unpaired from McCaskill for IP category\n SPUn = [] # probability of being unpaired from McCaskill for Un category\n RNA=[]\n\n \n struct=dict()\n reactivity=dict()\n\n \n \n\t\n list3=[]\n \n for filz in GetListFile(path_Fasta, FileExtensionFasta):\n #print reactivity\n \n \n rna = Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[1]\n structure=Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[2]\n States[filz] = Pairing_status(structure)\n reactivity[filz]=parseReactivityfile(os.path.join(path_SHAPE, filz + react+'Shape.txt'))\n\n # Get the end-Helix positions\n print \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\",filz, len(States[filz])\n print States[filz][-1]\n for i,elem in enumerate(States[filz]):\n \n if elem=='Un' or elem=='PK':\n list3.append(elem)\n if elem=='P':\n #print i, elem,i-1,i+1, States[filz][i+1]\n if i in range(1,len(rna)-3) and (States[filz][i-1]=='Un' or States[filz][i-1]=='PK' or States[filz][i+1]=='Un' or States[filz][i-1]=='PK' ): # wh should add PK constraint because we are looking for stacked substructures that does not take into account thhe tertiary or pseudoknots extension!!\n list3.append('HE')\n else:\n list3.append(elem)\n cum=[]\n for filz in GetListFile(path_Fasta, FileExtensionFasta):\n if reactivity[filz]==[]:\n\t\tprint \"warning!! Empty reactivity\",rna\n\tcum=cum+reactivity[filz]\n Shape=cum\n #print len(Shape)\n lIP = [] # shape values for paired Positions\n lUn = []\n lHE =[]\n # for structural_study \n #print [elem[:-1] for elem in Shape]\n for nucl,shape in zip(list3,Shape):\n\t\tif shape!=-10 and nucl=='P':\n print \"la vaaleur\", shape\n\t\t lIP.append( float(shape))\n\t\t #SPIP.append(UnpPb )\n\t\tif shape!=-10 and nucl=='Un':\n\t\t lUn.append( float(shape))\n\t\t #SPUn.append(UnpPb )\n\t\tif shape!=-10 and nucl=='HE':\n\t\t lHE.append( float(shape))\n import numpy as np\n labels=[\"Stacked nucleotides\",\"Unpaired nucleotides\",\"Helix-end\"]\n lists= [lIP,lUn, lHE]\n for (data, title) in [(lIP, \"P\" + str(react)), (lUn, \"U\"+ str(react)),(lHE,\"HE\"+ str(react))]:\n \tprint title ,'\\n' , data\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Problem No.: 77
# Solver: Jinmin Goh
# Date: 20191230
# URL: https://leetcode.com/problems/combinations/
import sys
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k == 0:
return [[]]
ans = []
for i in range(k, n + 1) :
for temp_ans in self.combine(i - 1, k - 1):
ans.append(temp_ans + [i])
return ans
"""
# correct for 26/27 and TLE
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k == 0:
return [[]]
if n < k:
return []
nList = [i + 1 for i in range(n)]
if n == k:
return [nList]
if n == k:
return [[i + 1] for i in range(n)]
self.ans = []
if n//2 > k:
self.makeFunc(nList[:], k, [])
else:
self.delFunc(n-k, nList)
return self.ans
def makeFunc(self, nList: list, k: int, temp_ans: list) -> None:
if k == 0:
temp_ans.sort()
if temp_ans not in self.ans:
self.ans.append(temp_ans)
return
else:
return
else:
for i in range(len(nList)):
temp = nList[:]
temp_temp_ans = temp_ans[:]
temp_temp_ans.append(nList[i])
temp.pop(i)
self.makeFunc(temp[:], k-1, temp_temp_ans[:])
def delFunc(self, k: int, temp_ans: list) -> None:
if k == 0:
temp_ans.sort()
if temp_ans not in self.ans:
self.ans.append(temp_ans)
return
else:
return
else:
for i in range(len(temp_ans)):
temp = temp_ans[:]
temp.pop(i)
self.delFunc(k-1, temp[:])
"""
|
normal
|
{
"blob_id": "e4a2c605ef063eee46880515dfff05562916ab81",
"index": 9976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def combine(self, n: int, k: int) ->List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1):\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\n<mask token>\n",
"step-4": "import sys\n\n\nclass Solution:\n\n def combine(self, n: int, k: int) ->List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1):\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\n<mask token>\n",
"step-5": "# Problem No.: 77\n# Solver: Jinmin Goh\n# Date: 20191230\n# URL: https://leetcode.com/problems/combinations/\n\nimport sys\n\nclass Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1) :\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\"\"\"\n# correct for 26/27 and TLE\nclass Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n if k == 0:\n return [[]]\n if n < k:\n return []\n nList = [i + 1 for i in range(n)]\n if n == k:\n return [nList]\n if n == k:\n return [[i + 1] for i in range(n)]\n self.ans = []\n if n//2 > k:\n self.makeFunc(nList[:], k, [])\n else:\n self.delFunc(n-k, nList)\n return self.ans\n \n def makeFunc(self, nList: list, k: int, temp_ans: list) -> None:\n if k == 0:\n temp_ans.sort()\n if temp_ans not in self.ans:\n self.ans.append(temp_ans)\n return\n else:\n return\n else:\n for i in range(len(nList)):\n temp = nList[:]\n temp_temp_ans = temp_ans[:]\n temp_temp_ans.append(nList[i])\n temp.pop(i)\n self.makeFunc(temp[:], k-1, temp_temp_ans[:])\n def delFunc(self, k: int, temp_ans: list) -> None:\n if k == 0:\n temp_ans.sort()\n if temp_ans not in self.ans:\n self.ans.append(temp_ans)\n return\n else:\n return\n else:\n for i in range(len(temp_ans)):\n temp = temp_ans[:]\n temp.pop(i)\n self.delFunc(k-1, temp[:])\n \"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Plot funcs
Jan, 2018 Rose Yu @Caltech
"""
import matplotlib.pyplot as plt
import seaborn as sns
from util.matutil import *
from util.batchutil import *
def plot_img():
"""
plot ground truth (left) and reconstruction (right)
showing b/w image data of mnist
"""
plt.subplot(121)
plt.imshow(data.data.numpy()[0,].squeeze())
plt.subplot(122)
plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
sample = model.sample_z(data)
plt.imshow(sample)
def plot_kde():
"""
plot the kernel density estimation for 2d distributions
"""
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)
sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color="r", shade=True, ax=ax1)
sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color="b", shade=True, ax=ax2)
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
def plot_ts(data, enc_mean, dec_mean):
"""
plot time series with uncertainty
"""
# enc_mean, enc_cov = enc
# dec_mean, dec_cov = dec
batch_size = data.size()[0]
D = 2
N = int(data.size()[1]/D)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)
# plot data
plt.axes(ax1)
ax1.set_ylim(-0.1,0.1)
sns.tsplot(data.view(batch_size,N,-1).data.numpy())
# plot reconstruction
plt.axes(ax2)
ax2.set_ylim(-0.1,0.1)
sns.tsplot(dec_mean.view(batch_size,N,-1).data.numpy())
plt.axes(ax3)
sample_Sigma = bivech2(enc_mean.view(batch_size,N,-1))
sample_vechSigma = bvech(sample_Sigma).data.numpy()
sns.tsplot(sample_vechSigma)
# plot latent variables
# sample_Sigma = ivech2x(enc_cov.data.numpy())
# sample_vechSigma = vechx(sample_Sigma.reshape((-1,N,N)))
# sns.tsplot(sample_vechSigma)
|
normal
|
{
"blob_id": "cca9d91fe20e58f233ccfc4100edb748356ed234",
"index": 6311,
"step-1": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1] / D)\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n plt.axes(ax1)\n ax1.set_ylim(-0.1, 0.1)\n sns.tsplot(data.view(batch_size, N, -1).data.numpy())\n plt.axes(ax2)\n ax2.set_ylim(-0.1, 0.1)\n sns.tsplot(dec_mean.view(batch_size, N, -1).data.numpy())\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size, N, -1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n sns.tsplot(sample_vechSigma)\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom util.matutil import *\nfrom util.batchutil import *\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1] / D)\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n plt.axes(ax1)\n ax1.set_ylim(-0.1, 0.1)\n sns.tsplot(data.view(batch_size, N, -1).data.numpy())\n plt.axes(ax2)\n ax2.set_ylim(-0.1, 0.1)\n sns.tsplot(dec_mean.view(batch_size, N, -1).data.numpy())\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size, N, -1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n sns.tsplot(sample_vechSigma)\n",
"step-5": "\"\"\"\nPlot funcs \nJan, 2018 Rose Yu @Caltech \n\"\"\"\nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom util.matutil import *\nfrom util.batchutil import *\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n # enc_mean, enc_cov = enc\n # dec_mean, dec_cov = dec\n\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1]/D)\n\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n # plot data\n plt.axes(ax1)\n ax1.set_ylim(-0.1,0.1)\n\n sns.tsplot(data.view(batch_size,N,-1).data.numpy())\n\n # plot reconstruction\n plt.axes(ax2)\n ax2.set_ylim(-0.1,0.1)\n sns.tsplot(dec_mean.view(batch_size,N,-1).data.numpy())\n\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size,N,-1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n \n sns.tsplot(sample_vechSigma)\n\n # plot latent variables\n # sample_Sigma = ivech2x(enc_cov.data.numpy())\n # sample_vechSigma = vechx(sample_Sigma.reshape((-1,N,N)))\n # sns.tsplot(sample_vechSigma)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import logging, os, zc.buildout, sys, shutil
class ZipEggs:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
if options['target'] is None:
raise zc.buildout.UserError('Invalid Target')
if options['source'] is None:
raise zc.buildout.UserError('Invalid Source')
def zipit(self):
target = self.options['target']
if not os.path.exists(target):
os.mkdir(target)
path = self.options['source']
for dirs in os.listdir(path):
try:
source = os.path.join(path, dirs)
dist = "%s/%s" % (target, dirs)
print "%s > %s" % (source, dist)
shutil.make_archive(dist, "zip", source)
os.rename(dist+".zip", dist)
except OSError:
print "ignore %s" % dirs
return []
def install(self):
return self.zipit()
def update(self):
return self.zipit()
|
normal
|
{
"blob_id": "e7bec9018f25ba9e3c3ae8a5bbe11f8bc4b54a04",
"index": 5714,
"step-1": "import logging, os, zc.buildout, sys, shutil\n\nclass ZipEggs:\n def __init__(self, buildout, name, options):\n self.name, self.options = name, options\n if options['target'] is None:\n raise zc.buildout.UserError('Invalid Target')\n if options['source'] is None:\n raise zc.buildout.UserError('Invalid Source')\n\n\n def zipit(self):\n target = self.options['target']\n if not os.path.exists(target):\n os.mkdir(target)\n path = self.options['source']\n for dirs in os.listdir(path):\n try:\n source = os.path.join(path, dirs)\n dist = \"%s/%s\" % (target, dirs)\n print \"%s > %s\" % (source, dist)\n shutil.make_archive(dist, \"zip\", source)\n os.rename(dist+\".zip\", dist)\n except OSError:\n print \"ignore %s\" % dirs\n return []\n\n def install(self):\n return self.zipit()\n\n def update(self):\n return self.zipit()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('grid2', '0003_auto_20161231_2329'),
]
operations = [
migrations.RemoveField(
model_name='grid',
name='gameNumber',
),
migrations.RemoveField(
model_name='grid',
name='gameType',
),
migrations.AddField(
model_name='grid',
name='active',
field=models.BooleanField(default=True),
),
]
|
normal
|
{
"blob_id": "3e305cee2f814698729c008320e326c4bd42640d",
"index": 6629,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grid2', '0003_auto_20161231_2329')]\n operations = [migrations.RemoveField(model_name='grid', name=\n 'gameNumber'), migrations.RemoveField(model_name='grid', name=\n 'gameType'), migrations.AddField(model_name='grid', name='active',\n field=models.BooleanField(default=True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grid2', '0003_auto_20161231_2329')]\n operations = [migrations.RemoveField(model_name='grid', name=\n 'gameNumber'), migrations.RemoveField(model_name='grid', name=\n 'gameType'), migrations.AddField(model_name='grid', name='active',\n field=models.BooleanField(default=True))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('grid2', '0003_auto_20161231_2329'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='grid',\n name='gameNumber',\n ),\n migrations.RemoveField(\n model_name='grid',\n name='gameType',\n ),\n migrations.AddField(\n model_name='grid',\n name='active',\n field=models.BooleanField(default=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print("Hi, Ich bin " + self.name)
def say_hi_to_everybody(self):
print("Hi to all objects :-)")
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print("Hi, Ich bin " + self.name)
name_1 = "Marvin"
name_2 = "James"
x = Robot(name_1)
y = PhysicianRobot(name_2)
print(x, type(x))
x.say_hi()
x.say_hi_to_everybody()
print(y, type(y))
y.say_hi()
y.say_hi_again()
y.say_hi_to_everybody()
|
normal
|
{
"blob_id": "6b24c438ca7bb4c37ae356c18c562831767f0569",
"index": 9961,
"step-1": "<mask token>\n\n\nclass PhysicianRobot(Robot):\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\n",
"step-3": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-4": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\nname_1 = 'Marvin'\nname_2 = 'James'\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-5": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print(\"Hi, Ich bin \" + self.name)\n\n def say_hi_to_everybody(self):\n print(\"Hi to all objects :-)\")\n\n\nclass PhysicianRobot(Robot):\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print(\"Hi, Ich bin \" + self.name)\n\n\nname_1 = \"Marvin\"\nname_2 = \"James\"\n\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\n\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\n\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-ids": [
1,
6,
7,
8,
9
]
}
|
[
1,
6,
7,
8,
9
] |
import rdflib
import csv
from time import sleep
gtypes = {}
dtypes = {}
atypes = {}
g = rdflib.Graph()
g.parse("http://geographicknowledge.de/vocab/CoreConceptData.rdf#")
g.parse("./ontology.ttl", format="ttl")
sleep(.5)
results = g.query("""
prefix skos: <http://www.w3.org/2004/02/skos/core#>
prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>
prefix dcat: <https://www.w3.org/TR/vocab-dcat#>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select ?dataset ?type
where {
?dataset a dcat:Dataset , ?type .
filter (
?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )
)
}
""")
for result in results:
uri, geometry_type = result
gtypes[str(uri)] = str(geometry_type).split('#')[1]
results = g.query("""
prefix skos: <http://www.w3.org/2004/02/skos/core#>
prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>
prefix dcat: <https://www.w3.org/TR/vocab-dcat#>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select ?dataset ?type
where {
?dataset a dcat:Dataset , ?type .
?type rdfs:subClassOf+ ccd:CoreConceptDataSet .
}
""")
for result in results:
uri, dtype = result
dtypes[str(uri)] = str(dtype).split('#')[1]
results = g.query("""
prefix skos: <http://www.w3.org/2004/02/skos/core#>
prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>
prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select ?dataset ?label ?type
where {
?attribute ada:ofDataSet ?dataset ;
skos:exactMatch ?concept ;
rdfs:label ?label .
optional {
?concept a ?type .
?type rdfs:subClassOf+ ccd:Attribute .
}
}
group by ?dataset ?label ?type
""")
for result in results:
dataset, label, atype = result
key = (str(dataset), str(label))
if atype is None and key not in atypes:
atypes[key] = ""
elif atype is not None:
atypes[key] = str(atype).split('#')[1]
test_gtypes = {}
test_dtypes = {}
test_atypes = {}
with open("./datasets/annotations_datasets.csv", 'r') as fin:
reader = csv.reader(fin)
next(reader)
for row in reader:
test_gtypes[row[0]] = row[1]
test_dtypes[row[0]] = row[2]
with open("./datasets/annotations_attributes.csv", 'r') as fin:
reader = csv.reader(fin)
next(reader)
for row in reader:
test_atypes[(row[0],row[1])] = row[2]
tp = 0
total = 0
fn = len(test_gtypes)
for k, v in gtypes.items():
if k not in test_gtypes: # skip some extra test datasets
continue
total += 1
if test_gtypes[k] == v:
tp += 1
fn -= 1
p = tp / total
r = tp / (tp + fn)
f = 2 * ((p * r) / (p + r))
print("Geometry type scores:")
print(f"P: {p} , R: {r} , F: {f}")
tp = 0
total = 0
fn = len(test_dtypes)
for k, v in dtypes.items():
if k not in test_dtypes:
continue
total += 1
if test_dtypes[k] == v:
tp += 1
fn -= 1
p = tp / total
r = tp / (tp + fn)
f = 2 * ((p * r) / (p + r))
print("Dataset type scores:")
print(f"P: {p} , R: {r} , F: {f}")
filter_nontypes = True
if filter_nontypes:
test_atypes = {k: v for k, v in test_atypes.items() if v != ""}
atypes = {k: v for k, v in atypes.items() if v != ""}
tp = 0
total = 0
fn = len(list(filter(lambda x: x != "", test_atypes.values())))
for k, v in atypes.items():
if k not in test_atypes:
continue
if v != "":
total += 1
if test_atypes[k] == v:
tp += 1
fn -= 1
elif v == "BooleanA" and test_atypes[k] == "NominalA": # boolean is "more" correct
tp += 1
fn -= 1
else:
print(k, v, test_atypes[k])
p = tp / total
r = tp / (tp + fn)
f = 2 * ((p * r) / (p + r))
print("Attribute type scores:")
print(f"P: {p} , R: {r} , F: {f}")
|
normal
|
{
"blob_id": "eb1fbe2de3c8548175eb3c8720353e466e3b68c7",
"index": 7336,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\n<mask token>\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\n<mask token>\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\n<mask token>\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\n<mask token>\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\n<mask token>\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\n<mask token>\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n<mask token>\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\n<mask token>\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n<mask token>\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\n<mask token>\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\n<mask token>\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n",
"step-3": "<mask token>\ngtypes = {}\ndtypes = {}\natypes = {}\ng = rdflib.Graph()\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\"\n )\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\"\n )\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\"\n )\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != '', test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n",
"step-4": "import rdflib\nimport csv\nfrom time import sleep\ngtypes = {}\ndtypes = {}\natypes = {}\ng = rdflib.Graph()\ng.parse('http://geographicknowledge.de/vocab/CoreConceptData.rdf#')\ng.parse('./ontology.ttl', format='ttl')\nsleep(0.5)\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\"\n )\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\"\n )\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\nresults = g.query(\n \"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\"\n )\nfor result in results:\n dataset, label, atype = result\n key = str(dataset), str(label)\n if atype is None and key not in atypes:\n atypes[key] = ''\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\nwith open('./datasets/annotations_datasets.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\nwith open('./datasets/annotations_attributes.csv', 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[row[0], row[1]] = row[2]\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes:\n continue\n total += 1\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Geometry type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Dataset type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != ''}\n atypes = {k: v for k, v in atypes.items() if v != ''}\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != '', test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n if v != '':\n total += 1\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == 'BooleanA' and test_atypes[k] == 'NominalA':\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\np = tp / total\nr = tp / (tp + fn)\nf = 2 * (p * r / (p + r))\nprint('Attribute type scores:')\nprint(f'P: {p} , R: {r} , F: {f}')\n",
"step-5": "import rdflib\nimport csv\n\nfrom time import sleep\n\ngtypes = {}\ndtypes = {}\natypes = {}\n\ng = rdflib.Graph()\ng.parse(\"http://geographicknowledge.de/vocab/CoreConceptData.rdf#\")\ng.parse(\"./ontology.ttl\", format=\"ttl\")\n\nsleep(.5)\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n\n filter (\n ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet )\n )\n }\n\"\"\")\n\nfor result in results:\n uri, geometry_type = result\n gtypes[str(uri)] = str(geometry_type).split('#')[1]\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix dcat: <https://www.w3.org/TR/vocab-dcat#>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?type\n where {\n ?dataset a dcat:Dataset , ?type .\n ?type rdfs:subClassOf+ ccd:CoreConceptDataSet .\n }\n\"\"\")\n\nfor result in results:\n uri, dtype = result\n dtypes[str(uri)] = str(dtype).split('#')[1]\n\n\nresults = g.query(\"\"\"\n prefix skos: <http://www.w3.org/2004/02/skos/core#>\n prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#>\n prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf>\n prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n select ?dataset ?label ?type\n where {\n ?attribute ada:ofDataSet ?dataset ;\n skos:exactMatch ?concept ;\n rdfs:label ?label .\n\n optional {\n ?concept a ?type .\n ?type rdfs:subClassOf+ ccd:Attribute .\n }\n }\n group by ?dataset ?label ?type\n\"\"\")\n\nfor result in results:\n dataset, label, atype = result\n key = (str(dataset), str(label))\n if atype is None and key not in atypes:\n atypes[key] = \"\"\n elif atype is not None:\n atypes[key] = str(atype).split('#')[1]\n\n\ntest_gtypes = {}\ntest_dtypes = {}\ntest_atypes = {}\n\nwith open(\"./datasets/annotations_datasets.csv\", 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_gtypes[row[0]] = row[1]\n test_dtypes[row[0]] = row[2]\n\nwith open(\"./datasets/annotations_attributes.csv\", 'r') as fin:\n reader = csv.reader(fin)\n next(reader)\n for row in reader:\n test_atypes[(row[0],row[1])] = row[2]\n\ntp = 0\ntotal = 0\nfn = len(test_gtypes)\nfor k, v in gtypes.items():\n if k not in test_gtypes: # skip some extra test datasets\n continue\n total += 1\n\n if test_gtypes[k] == v:\n tp += 1\n fn -= 1\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Geometry type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\ntp = 0\ntotal = 0\nfn = len(test_dtypes)\nfor k, v in dtypes.items():\n if k not in test_dtypes:\n continue\n total += 1\n\n if test_dtypes[k] == v:\n tp += 1\n fn -= 1\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Dataset type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\nfilter_nontypes = True\nif filter_nontypes:\n test_atypes = {k: v for k, v in test_atypes.items() if v != \"\"}\n atypes = {k: v for k, v in atypes.items() if v != \"\"}\n\ntp = 0\ntotal = 0\nfn = len(list(filter(lambda x: x != \"\", test_atypes.values())))\nfor k, v in atypes.items():\n if k not in test_atypes:\n continue\n\n if v != \"\":\n total += 1\n\n if test_atypes[k] == v:\n tp += 1\n fn -= 1\n elif v == \"BooleanA\" and test_atypes[k] == \"NominalA\": # boolean is \"more\" correct\n tp += 1\n fn -= 1\n else:\n print(k, v, test_atypes[k])\n\np = tp / total\nr = tp / (tp + fn)\nf = 2 * ((p * r) / (p + r))\n\nprint(\"Attribute type scores:\")\nprint(f\"P: {p} , R: {r} , F: {f}\")\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from urllib import request, parse
import pandas as pd
import json
import os
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'
url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'
params = {
'odpt:operator': 'odpt.Operator:YokohamaMunicipal',
}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
#BusInfo.bus_routes = pd.DataFrame()
#return
busroute_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'route_id': v['owl:sameAs'],
'route_name': v['dc:title'],
}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
#BusInfo.bus_stops = pd.DataFrame()
#return
busstop_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'busstop_id': v['owl:sameAs'],
'pole_name': v['dc:title'],
}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list=[]
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color='yellow'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color='red'
else:
color='gray'
bus = { 'bus_id': v['odpt:busNumber'],
'lat': v['geo:lat'],
'lng': v['geo:long'],
'route_num': v['odpt:busroute'][-3:],
'route_id': v['odpt:busroutePattern'],
'prevStop': v['odpt:fromBusstopPole'],
'nextStop': v['odpt:toBusstopPole'],
'occupancy' : occupancy,
'color' : color,
'azimuth' : v['odpt:azimuth'],
'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'
}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')
return df.fillna("-")
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
#print(BusInfo.bus_routes)
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
|
normal
|
{
"blob_id": "7eefcfdb9682cb09ce2d85d11aafc04977016ba4",
"index": 8332,
"step-1": "<mask token>\n\n\nclass BusInfo:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-4": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-5": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\nclass BusInfo:\n\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'\n url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'\n \n params = {\n 'odpt:operator': 'odpt.Operator:YokohamaMunicipal',\n }\n\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n \n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n \n #BusInfo.bus_routes = pd.DataFrame()\n #return\n\n busroute_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'route_id': v['owl:sameAs'],\n 'route_name': v['dc:title'],\n }\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n\n #BusInfo.bus_stops = pd.DataFrame()\n #return\n\n busstop_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'busstop_id': v['owl:sameAs'],\n 'pole_name': v['dc:title'],\n }\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n\n if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color='yellow'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color='red'\n else:\n color='gray'\n\n bus = { 'bus_id': v['odpt:busNumber'],\n 'lat': v['geo:lat'],\n 'lng': v['geo:long'],\n 'route_num': v['odpt:busroute'][-3:],\n 'route_id': v['odpt:busroutePattern'],\n 'prevStop': v['odpt:fromBusstopPole'],\n 'nextStop': v['odpt:toBusstopPole'],\n 'occupancy' : occupancy,\n 'color' : color,\n 'azimuth' : v['odpt:azimuth'],\n 'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'\n }\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')\n return df.fillna(\"-\")\n\nif __name__ == '__main__':\n\n BusInfo.init()\n\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n #print(BusInfo.bus_routes)\n print(len(BusInfo.bus_routes))\n\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from rest_framework import serializers
from users.models import bills, Userinfo
class billsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = bills
fields = ('bname', 'bamount', 'duedate', 'user_id')
class UserinfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Userinfo
fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode', 'dob', 'phone', 'email', 'author')
|
normal
|
{
"blob_id": "124ece8f2f4ecc53d19657e2463cc608befb1ce7",
"index": 3722,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-3": "<mask token>\n\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = bills\n fields = 'bname', 'bamount', 'duedate', 'user_id'\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-4": "from rest_framework import serializers\nfrom users.models import bills, Userinfo\n\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = bills\n fields = 'bname', 'bamount', 'duedate', 'user_id'\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-5": "from rest_framework import serializers\nfrom users.models import bills, Userinfo\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = bills\n fields = ('bname', 'bamount', 'duedate', 'user_id')\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode', 'dob', 'phone', 'email', 'author')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# from datetime import datetime
from datetime import datetime, time, timedelta
# today = datetime.now()
# previous_day = today - timedelta(days=1)
# previous_day = previous_day.strftime("%Y%m%d")
# print(today)
# print(previous_day)
print(datetime.strptime("2013-1-25", '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))
# def is_midnight():
# current_time = datetime.now().time
# match_date = datetime.now()
# # current_time = datetime.now()
# if current_time >= time(0,0) and current_time <= time(4, 30):
# print(match_date)
# return match_date.strftime("%Y%m%d")
# else:
# match_date = current_time - timedelta(days=1)
# return match_date
# print(is_midnight())
# def is_midnight():
# current_time = datetime.now().time()
# # today = datetime.now()
# previous_day = current_time - timedelta(days=1)
# previous_day = previous_day.strftime("%Y%m%d")
# if current_time >= time(0,0) and current_time <= time(4, 30):
# print('yesy')
# if time(0,0) < time(4, 30):
# return current_time >= time(0,0) and current_time <= time(4, 30)
# else: # crosses midnight
# return current_time >= time(0,0) or current_time <= time(4, 30)
# print(is_midnight())
# match_date = datetime.now()
# current_time = datetime.now().time()
# if current_time >= time(0,0) and current_time <= time(4, 30):
# previous_day = match_date - timedelta(days=1)
# match_date = previous_day.strftime("%Y%m%d")
# print(match_date)
# match_date = datetime.now().strftime("%Y-%m-%d")
# # datetime.timedelta(days=1)
# hour = datetime.now().strftime("%H")
# if int(hour) < 4:
# print('Previous day: %s' % (datetime.timedelta(days=1)))
# else:
# print(match_date)
# print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# import cx_Oracle
# connection = None
# try:
# connection = cx_Oracle.connect(
# 'JW',
# '901203',
# 'HOME-PC/XE',
# encoding='UTF-8')
# # show the version of the Oracle Database
# print(connection.version)
# c = connection.cursor()
# c.execute('SELECT MATCH_ID,MATCH_DATETIME,LEAGUE,HOME_TEAM,AWAY_TEAM,HOME_FT_GOAL,AWAY_FT_GOAL,CASE WHEN HOME_FT_GOAL > AWAY_FT_GOAL THEN \'H\' WHEN HOME_FT_GOAL = AWAY_FT_GOAL THEN \'D\' ELSE \'A\' END HDA_RESULT,DRAW_MEAN,DRAW_MEDIAN,O_MACAU_D,O_BET365_D,O_HKJC_D,AWAY_MEAN,AWAY_MEDIAN,O_MACAU_A,O_BET365_A,O_HKJC_A,HKJC_ASIAN_HANDICAP,HKJC_ASIAN_AWAY FROM HDA_MEAN_VIEW WHERE MATCH_HANDICAP=\'上盤\' AND HOME_IND=0 AND DRAW_IND=1 AND AWAY_IND=1 ORDER BY MATCH_DATETIME DESC')
# for row in c:
# print(row[0],',',row[1])
# except cx_Oracle.Error as error:
# print(error)
# finally:
# # release the connection
# if connection:
# connection.close()
|
normal
|
{
"blob_id": "4dac8e7e695c473cb73ceaf3887373bcc0a08aff",
"index": 5940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(datetime.strptime('2013-1-25', '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n",
"step-3": "from datetime import datetime, time, timedelta\nprint(datetime.strptime('2013-1-25', '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n",
"step-4": "# from datetime import datetime\nfrom datetime import datetime, time, timedelta\n# today = datetime.now()\n# previous_day = today - timedelta(days=1)\n# previous_day = previous_day.strftime(\"%Y%m%d\")\n# print(today)\n# print(previous_day)\n\nprint(datetime.strptime(\"2013-1-25\", '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n\n# def is_midnight():\n# current_time = datetime.now().time\n# match_date = datetime.now()\n# # current_time = datetime.now()\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# print(match_date)\n# return match_date.strftime(\"%Y%m%d\")\n# else:\n# match_date = current_time - timedelta(days=1)\n# return match_date\n \n# print(is_midnight())\n\n# def is_midnight():\n# current_time = datetime.now().time()\n# # today = datetime.now()\n# previous_day = current_time - timedelta(days=1)\n# previous_day = previous_day.strftime(\"%Y%m%d\")\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# print('yesy')\n # if time(0,0) < time(4, 30):\n # return current_time >= time(0,0) and current_time <= time(4, 30)\n # else: # crosses midnight\n # return current_time >= time(0,0) or current_time <= time(4, 30)\n\n# print(is_midnight())\n\n\n# match_date = datetime.now()\n# current_time = datetime.now().time()\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# previous_day = match_date - timedelta(days=1)\n# match_date = previous_day.strftime(\"%Y%m%d\")\n \n# print(match_date)\n\n# match_date = datetime.now().strftime(\"%Y-%m-%d\")\n# # datetime.timedelta(days=1)\n# hour = datetime.now().strftime(\"%H\")\n# if int(hour) < 4:\n# print('Previous day: %s' % (datetime.timedelta(days=1)))\n# else:\n# print(match_date)\n \n# print(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n# import cx_Oracle\n\n# connection = None\n# try:\n# connection = cx_Oracle.connect(\n# 'JW',\n# '901203',\n# 'HOME-PC/XE',\n# encoding='UTF-8')\n\n# # show the version of the Oracle Database\n# print(connection.version)\n \n# c = connection.cursor()\n# c.execute('SELECT MATCH_ID,MATCH_DATETIME,LEAGUE,HOME_TEAM,AWAY_TEAM,HOME_FT_GOAL,AWAY_FT_GOAL,CASE WHEN HOME_FT_GOAL > AWAY_FT_GOAL THEN \\'H\\' WHEN HOME_FT_GOAL = AWAY_FT_GOAL THEN \\'D\\' ELSE \\'A\\' END HDA_RESULT,DRAW_MEAN,DRAW_MEDIAN,O_MACAU_D,O_BET365_D,O_HKJC_D,AWAY_MEAN,AWAY_MEDIAN,O_MACAU_A,O_BET365_A,O_HKJC_A,HKJC_ASIAN_HANDICAP,HKJC_ASIAN_AWAY FROM HDA_MEAN_VIEW WHERE MATCH_HANDICAP=\\'上盤\\' AND HOME_IND=0 AND DRAW_IND=1 AND AWAY_IND=1 ORDER BY MATCH_DATETIME DESC')\n# for row in c:\n# print(row[0],',',row[1])\n# except cx_Oracle.Error as error:\n# print(error)\n# finally:\n# # release the connection\n# if connection:\n# connection.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import pandas as pd
import time
def job_spider(jid="1913e38066dd3c8e1Hd40t--FVE~", ka="search_list_1", i=0):
# request info.
job_url = "https://www.zhipin.com/job_detail/" + jid + ".html"
headers = {
'cache-control': "no-cache",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {"ka": ka}
try:
# request
r = requests.request("GET", job_url, headers=headers, params=querystring)
content = r.content.decode('utf-8')
# raw data.
file = "./raw_data/page/" + jid + ".html"
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = "suceed"
except IOError:
result = "failed"
log = "job " + str(i) + " : " + jid + " crawl " + result
print(log)
if __name__ == "__main__":
file = "./raw_data/list/job_list.csv"
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
# print(jid_list)
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
|
normal
|
{
"blob_id": "5b894eac93bff44931df4ef8d845c23071a03227",
"index": 3461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-4": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-5": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid=\"1913e38066dd3c8e1Hd40t--FVE~\", ka=\"search_list_1\", i=0):\n # request info.\n job_url = \"https://www.zhipin.com/job_detail/\" + jid + \".html\"\n\n headers = {\n 'cache-control': \"no-cache\",\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.80 Safari/537.36'\n }\n\n querystring = {\"ka\": ka}\n\n try:\n # request\n r = requests.request(\"GET\", job_url, headers=headers, params=querystring)\n content = r.content.decode('utf-8')\n\n # raw data.\n file = \"./raw_data/page/\" + jid + \".html\"\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = \"suceed\"\n except IOError:\n result = \"failed\"\n\n log = \"job \" + str(i) + \" : \" + jid + \" crawl \" + result\n print(log)\n\n\nif __name__ == \"__main__\":\n\n file = \"./raw_data/list/job_list.csv\"\n df = pd.read_csv(file, encoding='utf-8', header=None)\n\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n # print(jid_list)\n\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from search import SearchEngine
import tkinter as tk
if __name__ == "__main__":
ghettoGoogle = SearchEngine()
def searchButtonEvent():
search_query = searchQueryWidget.get()
search_results = ghettoGoogle.search(search_query)
resultsCanvas = tk.Tk()
if search_results == None:
tk.Label(resultsCanvas,text="No results",justify=tk.LEFT).pack(fill='both')
else:
searchTextBox = tk.Text(resultsCanvas,height=20,width=100)
searchTextBox.pack(side=tk.LEFT,fill=tk.Y)
scrollBar = tk.Scrollbar(resultsCanvas)
scrollBar.pack(side=tk.RIGHT,fill=tk.Y)
scrollBar.config(command=searchTextBox.yview)
searchTextBox.config(yscrollcommand=scrollBar.set)
searchTextBox.tag_config('Link',foreground='blue')
for i in range(len(search_results)):
searchTextBox.insert(tk.END,search_results[i][0]+"\n",'Link')
searchTextBox.insert(tk.END,search_results[i][1]+"\n\n")
canvas = tk.Tk()
tk.Label(canvas, text = "Enter search query").grid(row = 0)
searchQueryWidget = tk.Entry(canvas)
searchQueryWidget.grid(row=0,column=1)
tk.Button(canvas,text="Quit",command=canvas.quit).grid(row=1,column=0,sticky=tk.W)
tk.Button(canvas,text="Search",command=searchButtonEvent).grid(row=1,column=0,sticky=tk.W)
canvas.mainloop()
|
normal
|
{
"blob_id": "0cf90cd7704db9f7467e458b402fadb01c701148",
"index": 7307,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent():\n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None:\n tk.Label(resultsCanvas, text='No results', justify=tk.LEFT).pack(\n fill='both')\n else:\n searchTextBox = tk.Text(resultsCanvas, height=20, width=100)\n searchTextBox.pack(side=tk.LEFT, fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT, fill=tk.Y)\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n searchTextBox.tag_config('Link', foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END, search_results[i][0] + '\\n',\n 'Link')\n searchTextBox.insert(tk.END, search_results[i][1] + '\\n\\n')\n canvas = tk.Tk()\n tk.Label(canvas, text='Enter search query').grid(row=0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0, column=1)\n tk.Button(canvas, text='Quit', command=canvas.quit).grid(row=1, column=\n 0, sticky=tk.W)\n tk.Button(canvas, text='Search', command=searchButtonEvent).grid(row=1,\n column=0, sticky=tk.W)\n canvas.mainloop()\n",
"step-3": "from search import SearchEngine\nimport tkinter as tk\nif __name__ == '__main__':\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent():\n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None:\n tk.Label(resultsCanvas, text='No results', justify=tk.LEFT).pack(\n fill='both')\n else:\n searchTextBox = tk.Text(resultsCanvas, height=20, width=100)\n searchTextBox.pack(side=tk.LEFT, fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT, fill=tk.Y)\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n searchTextBox.tag_config('Link', foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END, search_results[i][0] + '\\n',\n 'Link')\n searchTextBox.insert(tk.END, search_results[i][1] + '\\n\\n')\n canvas = tk.Tk()\n tk.Label(canvas, text='Enter search query').grid(row=0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0, column=1)\n tk.Button(canvas, text='Quit', command=canvas.quit).grid(row=1, column=\n 0, sticky=tk.W)\n tk.Button(canvas, text='Search', command=searchButtonEvent).grid(row=1,\n column=0, sticky=tk.W)\n canvas.mainloop()\n",
"step-4": "from search import SearchEngine\nimport tkinter as tk \n\nif __name__ == \"__main__\":\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent(): \n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None: \n tk.Label(resultsCanvas,text=\"No results\",justify=tk.LEFT).pack(fill='both') \n else: \n searchTextBox = tk.Text(resultsCanvas,height=20,width=100)\n searchTextBox.pack(side=tk.LEFT,fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT,fill=tk.Y)\n\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n\n searchTextBox.tag_config('Link',foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END,search_results[i][0]+\"\\n\",'Link')\n searchTextBox.insert(tk.END,search_results[i][1]+\"\\n\\n\")\n\n canvas = tk.Tk()\n tk.Label(canvas, text = \"Enter search query\").grid(row = 0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0,column=1)\n \n tk.Button(canvas,text=\"Quit\",command=canvas.quit).grid(row=1,column=0,sticky=tk.W)\n tk.Button(canvas,text=\"Search\",command=searchButtonEvent).grid(row=1,column=0,sticky=tk.W)\n canvas.mainloop() \n\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
species(
label = 'C=C([CH]C)C(=C)[CH]C(24182)',
structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),
E0 = (249.687,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1([CH]C)CC1=CC(25275)',
structure = SMILES('[CH2]C1([CH]C)CC1=CC'),
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=CC)C(C)=[C]C(25412)',
structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(C)=CC(25413)',
structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=CC)[C](C)C=C(24605)',
structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),
E0 = (216.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2][C](C=C)C(C)=CC(24606)',
structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),
E0 = (216.244,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2]C(=CC)[C]1CC1C(25414)',
structure = SMILES('[CH2]C(=CC)[C]1CC1C'),
E0 = (289.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)"""),
)
species(
label = '[CH2][C]1C(=CC)CC1C(25415)',
structure = SMILES('[CH2]C1=C([CH]C)CC1C'),
E0 = (304.572,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(=C)C([CH2])=CC(25416)',
structure = SMILES('[CH2]C(=C)C([CH2])=CC'),
E0 = (285.713,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),
HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=C([CH]C)C[C]=CC(24184)',
structure = SMILES('[CH2]C(=CC)C[C]=CC'),
E0 = (366.985,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),
HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'CC=C1CCC1=CC(25269)',
structure = SMILES('CC=C1CCC1=CC'),
E0 = (114.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C([C]=CC)=CC(25417)',
structure = SMILES('[CH2]C([C]=CC)=CC'),
E0 = (334.774,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C1([CH]C)C(=C)C1C(25296)',
structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C(=CC)C(=C)C=C(24604)',
structure = SMILES('[CH2]C(=CC)C(=C)C=C'),
E0 = (242.677,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),
HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (107.173,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2]CC(=C)C([CH2])=CC(25418)',
structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),
E0 = (316.814,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),
HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C(CC)C([CH2])=CC(25419)',
structure = SMILES('[CH]=C(CC)C([CH2])=CC'),
E0 = (358.664,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(=C)CC(25420)',
structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),
E0 = (349.41,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH]=C([CH]C)C(C)=CC(25421)',
structure = SMILES('[CH]C(=CC)C(C)=CC'),
E0 = (317.373,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][C](C=C)C(=C)CC(24623)',
structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),
E0 = (228.159,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)"""),
)
species(
label = 'C[CH][C]1CCC1=CC(25422)',
structure = SMILES('C[CH]C1CCC=1[CH]C'),
E0 = (303.292,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)"""),
)
species(
label = '[CH2][C]1C(=C)C(C)C1C(25423)',
structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),
E0 = (305.852,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=CC(=C)C(C)=CC(24616)',
structure = SMILES('C=CC(=C)C(C)=CC'),
E0 = (91.1774,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=[C]C(C)C(=C)[CH]C(24183)',
structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),
E0 = (369.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),
HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C=C1C(=CC)CC1C(25265)',
structure = SMILES('C=C1C(=CC)CC1C'),
E0 = (118.381,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([C]=C)=CC(24774)',
structure = SMILES('[CH2]C([C]=C)=CC'),
E0 = (370.8,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C([CH]C)C(=C)CC(25424)',
structure = SMILES('[CH]C(=CC)C(=C)CC'),
E0 = (330.753,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC(=C)C(=C)CC(24630)',
structure = SMILES('C=CC(=C)C(=C)CC'),
E0 = (104.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=C1C(=C)C(C)C1C(25274)',
structure = SMILES('C=C1C(=C)C(C)C1C'),
E0 = (122.654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (291.23,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (538.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (497.951,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (380.338,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (399.474,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (722.113,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (343.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (705.575,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (537.022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (716.337,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (454.469,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (430.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (503.849,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (393.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (361.682,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (375.044,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (274.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (463.915,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (714.692,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (375.062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (258.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission
Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction2',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)CC1=CC(25275)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C](C)C=C(24605)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(C)=CC(24606)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction9',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C]1CC1C(25414)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction10',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=CC)CC1C(25415)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction11',
reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C[C]=CC(24184)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction13',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CC=C1CCC1=CC(25269)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction15',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(=C)CC(24623)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction22',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C[CH][C]1CCC1=CC(25422)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction24',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(C)=CC(24616)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction26',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=CC)CC1C(25265)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction27',
reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction28',
reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction29',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(=C)CC(24630)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction30',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=C)C(C)C1C(25274)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
network(
label = '4267',
isomers = [
'C=C([CH]C)C(=C)[CH]C(24182)',
],
reactants = [
('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4267',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
normal
|
{
"blob_id": "63093190ee20e10698bd99dcea94ccf5d076a006",
"index": 8921,
"step-1": "<mask token>\n",
"step-2": "species(label='C=C([CH]C)C(=C)[CH]C(24182)', structure=SMILES(\n '[CH2]C(=CC)C([CH2])=CC'), E0=(249.687, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.735277, 'amu*angstrom^2'), symmetry=1, barrier=(16.9055, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0632434,\n 'amu*angstrom^2'), symmetry=1, barrier=(29.514, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.737545, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.9576, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781, 'amu*angstrom^2'), symmetry=1, barrier\n =(16.8481, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.739219, 'amu*angstrom^2'), symmetry=1, barrier=(16.9961, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005, 0.0840749, -\n 5.09991e-05, 5.50851e-09, 4.14197e-12, 30198.9, 28.4131], Tmin=(100,\n 'K'), Tmax=(1039.09, 'K')), NASAPolynomial(coeffs=[18.1326, 0.0354522, \n -1.35159e-05, 2.44392e-09, -1.69358e-13, 25127.7, -67.5143], Tmin=(\n 1039.09, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(249.687, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='CH3CHCCH2(18175)', structure=SMILES('C=C=CC'), E0=(145.615,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2950, 3100, 1380, \n 975, 1025, 1650, 540, 610, 2055, 2750, 2800, 2850, 1350, 1500, 750, \n 1050, 1375, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.759584, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.4643, 'kJ/mol'), semiclassical=False)], spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(54.0904, 'amu'), collisionModel=\n TransportData(shapeIndex=2, epsilon=(2996.71, 'J/mol'), sigma=(5.18551,\n 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'\n ), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.74635, 0.0218189, 8.22353e-06, -2.14768e-08, 8.55624e-12, 17563.6, \n 12.7381], Tmin=(100, 'K'), Tmax=(1025.6, 'K')), NASAPolynomial(coeffs=[\n 6.82078, 0.0192338, -7.45622e-06, 1.36536e-09, -9.53195e-14, 16028, -\n 10.4333], Tmin=(1025.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax\n =(5000, 'K'), E0=(145.615, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf\n =(228.648, 'J/(mol*K)'), label='CH3CHCCH2', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C1([CH]C)CC1=CC(25275)', structure=SMILES(\n '[CH2]C1([CH]C)CC1=CC'), E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.263258, 0.0692237,\n -2.26363e-05, -1.35463e-08, 8.13734e-12, 55737.7, 31.4039], Tmin=(100,\n 'K'), Tmax=(1105.46, 'K')), NASAPolynomial(coeffs=[15.171, 0.0400578, -\n 1.66801e-05, 3.13624e-09, -2.2049e-13, 50927.8, -48.8594], Tmin=(\n 1105.46, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(462.221, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='C=[C][CH]C(18176)', structure=SMILES('[CH2][C]=CC'), E0=(\n 361.056, 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1685, 370, \n 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, 440, \n 815, 1455, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.352622, 'amu*angstrom^2'), symmetry=1, barrier\n =(8.10748, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.828631, 'amu*angstrom^2'), symmetry=1, barrier=(19.0519, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(54.0904, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.42015, 0.030446, -\n 1.69076e-05, 4.64684e-09, -5.12013e-13, 43485.7, 14.8304], Tmin=(100,\n 'K'), Tmax=(2065.83, 'K')), NASAPolynomial(coeffs=[10.7464, 0.014324, -\n 5.20136e-06, 8.69079e-10, -5.48385e-14, 40045.6, -31.3799], Tmin=(\n 2065.83, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(361.056, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(274.378,\n 'J/(mol*K)'), comment=\n 'Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)'))\nspecies(label='[CH2]C(=CC)C(C)=[C]C(25412)', structure=SMILES(\n '[CH2]C(=CC)C(C)=[C]C'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(C)=CC(25413)', structure=SMILES(\n '[CH2]C(=[C]C)C(C)=CC'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH2]C(=CC)[C](C)C=C(24605)', structure=SMILES(\n '[CH2]C=C(C)C([CH2])=CC'), E0=(216.244, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.712083, 'amu*angstrom^2'), symmetry=1, barrier=(16.3722, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.555659, 'amu*angstrom^2'\n ), symmetry=1, barrier=(96.3851, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.3711, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.712008, 'amu*angstrom^2'), symmetry=1, barrier=(16.3705,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.19211,\n 'amu*angstrom^2'), symmetry=1, barrier=(96.3849, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, 0.0775021, \n -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215], Tmin=(100,\n 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, 0.0376674, \n -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638], Tmin=(\n 1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2][C](C=C)C(C)=CC(24606)', structure=SMILES(\n '[CH2]C=C([CH2])C(C)=CC'), E0=(216.244, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, \n 0.0775021, -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215],\n Tmin=(100, 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, \n 0.0376674, -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638],\n Tmin=(1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2]C(=CC)[C]1CC1C(25414)', structure=SMILES(\n '[CH2]C(=CC)[C]1CC1C'), E0=(289.9, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.71289, 0.0520158, \n 3.84829e-05, -8.55933e-08, 3.61457e-11, 35003.5, 26.4903], Tmin=(100,\n 'K'), Tmax=(968.714, 'K')), NASAPolynomial(coeffs=[16.7686, 0.0352996, \n -1.24057e-05, 2.26286e-09, -1.62921e-13, 29566.5, -62.466], Tmin=(\n 968.714, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(289.9, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)'\n ))\nspecies(label='[CH2][C]1C(=CC)CC1C(25415)', structure=SMILES(\n '[CH2]C1=C([CH]C)CC1C'), E0=(304.572, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.583091, 0.0531885,\n 4.0938e-05, -9.08388e-08, 3.83549e-11, 36774.2, 26.4705], Tmin=(100,\n 'K'), Tmax=(972.301, 'K')), NASAPolynomial(coeffs=[18.2947, 0.0339462, \n -1.21014e-05, 2.24934e-09, -1.64353e-13, 30795.4, -71.5147], Tmin=(\n 972.301, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(304.572, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)'\n ))\nspecies(label='CH2(S)(23)', structure=SMILES('[CH2]'), E0=(419.862,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1369.36, 2789.41, \n 2993.36], 'cm^-1'))], spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(14.0266, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(1197.29, 'J/mol'), sigma=(3.8, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0.0, comment='GRI-Mech'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.19195, -0.00230793, \n 8.0509e-06, -6.60123e-09, 1.95638e-12, 50484.3, -0.754589], Tmin=(200,\n 'K'), Tmax=(1000, 'K')), NASAPolynomial(coeffs=[2.28556, 0.00460255, -\n 1.97412e-06, 4.09548e-10, -3.34695e-14, 50922.4, 8.67684], Tmin=(1000,\n 'K'), Tmax=(3000, 'K'))], Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(\n 419.862, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013,\n 'J/(mol*K)'), label='CH2(S)', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C(=C)C([CH2])=CC(25416)', structure=SMILES(\n '[CH2]C(=C)C([CH2])=CC'), E0=(285.713, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3100, 1380, 975, 1025, 1650, 2750, 2800, 2850, 1350, 1500, \n 750, 1050, 1375, 1000, 3000, 3033.33, 3066.67, 3100, 415, 465, 780, 850,\n 1435, 1475, 900, 1100, 3010, 987.5, 1337.5, 450, 1655, 311.383],\n 'cm^-1')), HinderedRotor(inertia=(0.327475, 'amu*angstrom^2'), symmetry\n =1, barrier=(22.5291, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.327466, 'amu*angstrom^2'), symmetry=1, barrier=(22.5294,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327318,\n 'amu*angstrom^2'), symmetry=1, barrier=(22.5272, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.327483, 'amu*angstrom^2'\n ), symmetry=1, barrier=(22.5297, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(94.1543, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 0.335271, 0.0676667, -2.76626e-05, -1.62749e-08, 1.21982e-11, 34506.8, \n 24.024], Tmin=(100, 'K'), Tmax=(980.594, 'K')), NASAPolynomial(coeffs=[\n 17.5531, 0.0266059, -9.47854e-06, 1.70194e-09, -1.19937e-13, 29727.4, -\n 65.8563], Tmin=(980.594, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(285.713, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(390.78, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=C([CH]C)C[C]=CC(24184)', structure=SMILES(\n '[CH2]C(=CC)C[C]=CC'), E0=(366.985, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([2995, 3025, 975, 1000, 1300, 1375, 400,\n 500, 1630, 1680, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, \n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 1685, 370, 350, 440,\n 435, 1725, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, 440, \n 815, 1455, 1000, 180, 579.702], 'cm^-1')), HinderedRotor(inertia=(\n 0.147406, 'amu*angstrom^2'), symmetry=1, barrier=(3.38916, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.64226, 'amu*angstrom^2'),\n symmetry=1, barrier=(14.7668, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164, 'amu*angstrom^2'), symmetry=1, barrier=\n (14.7526, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.643937, 'amu*angstrom^2'), symmetry=1, barrier=(14.8054, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.145327, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.34136, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n collisionModel=TransportData(shapeIndex=2, epsilon=(3683.66, 'J/mol'),\n sigma=(6.4482, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.29648, 0.0786067, -5.42868e-05, 1.96375e-08, -2.97459e-12, 44273.2, \n 31.2372], Tmin=(100, 'K'), Tmax=(1490.43, 'K')), NASAPolynomial(coeffs=\n [13.9025, 0.0420909, -1.75363e-05, 3.199e-09, -2.17227e-13, 40217.5, -\n 39.8334], Tmin=(1490.43, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(366.985, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='CC=C1CCC1=CC(25269)', structure=SMILES('CC=C1CCC1=CC'), E0=(\n 114.107, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.677799, 0.0585738, \n 5.80411e-06, -4.1598e-08, 1.78951e-11, 13856, 25.5085], Tmin=(100, 'K'),\n Tmax=(1034.79, 'K')), NASAPolynomial(coeffs=[13.4814, 0.0415234, -\n 1.65073e-05, 3.07348e-09, -2.16896e-13, 9469.28, -45.0922], Tmin=(\n 1034.79, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(114.107, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CH2(19)', structure=SMILES('[CH2]'), E0=(381.563, 'kJ/mol'),\n modes=[HarmonicOscillator(frequencies=([1032.72, 2936.3, 3459], 'cm^-1'\n ))], spinMultiplicity=3, opticalIsomers=1, molecularWeight=(14.0266,\n 'amu'), collisionModel=TransportData(shapeIndex=2, epsilon=(1197.29,\n 'J/mol'), sigma=(3.8, 'angstroms'), dipoleMoment=(0, 'C*m'),\n polarizability=(0, 'angstroms^3'), rotrelaxcollnum=0.0, comment=\n 'GRI-Mech'), energyTransferModel=SingleExponentialDown(alpha0=(3.5886,\n 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[\n NASAPolynomial(coeffs=[3.8328, 0.000224446, 4.68033e-06, -6.04743e-09, \n 2.59009e-12, 45920.8, 1.40666], Tmin=(200, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(coeffs=[3.16229, 0.00281798, -7.56235e-07, 5.05446e-11, \n 5.65236e-15, 46099.1, 4.77656], Tmin=(1000, 'K'), Tmax=(3000, 'K'))],\n Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(381.563, 'kJ/mol'), Cp0=(33.2579,\n 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), label='CH2', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C([C]=CC)=CC(25417)', structure=SMILES(\n '[CH2]C([C]=CC)=CC'), E0=(334.774, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([350, 440, 435, 1725, 1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 3000, 3100, 440, 815, 1455, 1000, 2995, 3025, 975, 1000, \n 1300, 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.7606, 'amu*angstrom^2'), symmetry=1, barrier=(17.4877,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760854,\n 'amu*angstrom^2'), symmetry=1, barrier=(17.4935, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.760586, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.4874, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146, 'amu*angstrom^2'), symmetry=1, barrier=\n (49.4663, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(94.1543, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.352604, 0.0734369,\n -5.91187e-05, 2.57941e-08, -4.60694e-12, 40400.9, 25.1788], Tmin=(100,\n 'K'), Tmax=(1327.42, 'K')), NASAPolynomial(coeffs=[14.2321, 0.0316126, \n -1.18565e-05, 2.05761e-09, -1.36512e-13, 36716.1, -45.7131], Tmin=(\n 1327.42, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(334.774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(390.78,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C1([CH]C)C(=C)C1C(25296)', structure=SMILES(\n '[CH2]C1([CH]C)C(=C)C1C'), E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.29276, 0.0655305, \n -4.50464e-06, -3.74661e-08, 1.7759e-11, 56253.7, 30.0992], Tmin=(100,\n 'K'), Tmax=(1027.4, 'K')), NASAPolynomial(coeffs=[16.6435, 0.0372633, -\n 1.49065e-05, 2.81296e-09, -2.01072e-13, 51026, -58.316], Tmin=(1027.4,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 466.494, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='H(3)', structure=SMILES('[H]'), E0=(211.792, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(1.00794, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1205.6, 'J/mol'),\n sigma=(2.05, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0.0, comment='GRI-Mech'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 2.5, 9.24385e-15, -1.3678e-17, 6.66185e-21, -1.00107e-24, 25472.7, -\n 0.459566], Tmin=(100, 'K'), Tmax=(3459.6, 'K')), NASAPolynomial(coeffs=\n [2.5, 9.20456e-12, -3.58608e-15, 6.15199e-19, -3.92042e-23, 25472.7, -\n 0.459566], Tmin=(3459.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(211.792, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'),\n CpInf=(20.7862, 'J/(mol*K)'), label='H', comment=\n 'Thermo library: BurkeH2O2'))\nspecies(label='[CH2]C(=CC)C(=C)C=C(24604)', structure=SMILES(\n '[CH2]C(=CC)C(=C)C=C'), E0=(242.677, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3000, 3050, 3100, 1330, 1430, 900, 1050, 1000, 1050, 1600, \n 1700, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, \n 440, 815, 1455, 1000, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630,\n 1680, 181.962, 683.313], 'cm^-1')), HinderedRotor(inertia=(0.669842,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1337, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0582339,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1767, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.83204, 'amu*angstrom^2'),\n symmetry=1, barrier=(19.1302, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237, 'amu*angstrom^2'), symmetry=1, barrier=\n (104.569, 'kJ/mol'), semiclassical=False)], spinMultiplicity=2,\n opticalIsomers=1, molecularWeight=(107.173, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.293043, 0.0682771,\n -2.00337e-05, -2.05401e-08, 1.21516e-11, 29332.3, 27.0261], Tmin=(100,\n 'K'), Tmax=(1018.57, 'K')), NASAPolynomial(coeffs=[15.7386, 0.0358123, \n -1.37404e-05, 2.51366e-09, -1.76142e-13, 24723.4, -54.9529], Tmin=(\n 1018.57, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(242.677, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(440.667,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]CC(=C)C([CH2])=CC(25418)', structure=SMILES(\n '[CH2]CC(=C)C([CH2])=CC'), E0=(316.814, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3010, 987.5, 1337.5, 450, 1655, 2750, \n 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 2950, 3100, 1380, 975, \n 1025, 1650, 325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2850, \n 1437.5, 1250, 1305, 750, 350, 3000, 3033.33, 3066.67, 3100, 415, 465, \n 780, 850, 1435, 1475, 900, 1100, 180, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.0368535, 'amu*angstrom^2'), symmetry=1, barrier=(17.9864,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00736317,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.60618, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.781153, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.9602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.9217, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.781104, 'amu*angstrom^2'), symmetry=1, barrier=(17.9591, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925, 0.0836004, -\n 5.1879e-05, 7.14877e-09, 3.44908e-12, 38270.9, 31.5928], Tmin=(100, 'K'\n ), Tmax=(1044.14, 'K')), NASAPolynomial(coeffs=[17.9255, 0.0352115, -\n 1.34219e-05, 2.42456e-09, -1.67785e-13, 33276.3, -63.0036], Tmin=(\n 1044.14, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(316.814, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C(CC)C([CH2])=CC(25419)', structure=SMILES(\n '[CH]=C(CC)C([CH2])=CC'), E0=(358.664, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3120, 650, 792.5, 1650, 3010, 987.5, \n 1337.5, 450, 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450,\n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 325, 375, 415, 465, \n 420, 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, \n 3100, 440, 815, 1455, 1000, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.701639, 'amu*angstrom^2'), symmetry=1, barrier=(16.1321, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.344302, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.1602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.1378, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.702005, 'amu*angstrom^2'), symmetry=1, barrier=(16.1405,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702379,\n 'amu*angstrom^2'), symmetry=1, barrier=(16.1491, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616, 0.0864938, -\n 5.84569e-05, 1.27697e-08, 1.75707e-12, 43308.4, 30.6389], Tmin=(100,\n 'K'), Tmax=(1047.28, 'K')), NASAPolynomial(coeffs=[18.4195, 0.034593, -\n 1.31104e-05, 2.35762e-09, -1.62637e-13, 38242.2, -66.6572], Tmin=(\n 1047.28, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(358.664, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(=C)CC(25420)', structure=SMILES(\n '[CH2]C(=[C]C)C(=C)CC'), E0=(349.41, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 2950, 3100, 1380, 975, 1025, 1650, 325, 375, 415, 465, 420, \n 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, \n 440, 815, 1455, 1000, 180, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.159905, 'amu*angstrom^2'), symmetry=1, barrier=(15.9368, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.693159, 'amu*angstrom^2'\n ), symmetry=1, barrier=(15.9371, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127, 'amu*angstrom^2'), symmetry=1, barrier\n =(15.9364, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.693165, 'amu*angstrom^2'), symmetry=1, barrier=(15.9372, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0150632,\n 'amu*angstrom^2'), symmetry=1, barrier=(15.9371, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231, 0.089245, -\n 7.16619e-05, 3.00631e-08, -5.07891e-12, 42198.9, 31.1306], Tmin=(100,\n 'K'), Tmax=(1412.15, 'K')), NASAPolynomial(coeffs=[19.0319, 0.0336833, \n -1.2643e-05, 2.20036e-09, -1.46165e-13, 36659.1, -70.2702], Tmin=(\n 1412.15, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(349.41, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH]=C([CH]C)C(C)=CC(25421)', structure=SMILES(\n '[CH]C(=CC)C(C)=CC'), E0=(317.373, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2762.5, \n 2775, 2787.5, 2800, 2812.5, 2825, 2837.5, 2850, 1350, 1380, 1410, 1440,\n 1470, 1500, 700, 750, 800, 1000, 1050, 1100, 1350, 1375, 1400, 900, \n 1000, 1100, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630, 1680, \n 200, 800, 1200, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.247945, 0.0873521, -6.16843e-05, 2.31486e-08, -3.62747e-12, 38328.8, \n 29.1665], Tmin=(100, 'K'), Tmax=(1460.93, 'K')), NASAPolynomial(coeffs=\n [15.297, 0.0447902, -1.7984e-05, 3.20673e-09, -2.14924e-13, 33786.8, -\n 51.7212], Tmin=(1460.93, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(317.373, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='[CH2][C](C=C)C(=C)CC(24623)', structure=SMILES(\n '[CH2]C(C=C)=C([CH2])CC'), E0=(228.159, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728, \n 0.0733281, -1.6094e-05, -3.35123e-08, 1.88363e-11, 27601.1, 30.4448],\n Tmin=(100, 'K'), Tmax=(975.095, 'K')), NASAPolynomial(coeffs=[18.3695, \n 0.0342638, -1.21408e-05, 2.16747e-09, -1.52112e-13, 22274, -66.8493],\n Tmin=(975.095, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(228.159, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)'\n ))\nspecies(label='C[CH][C]1CCC1=CC(25422)', structure=SMILES(\n 'C[CH]C1CCC=1[CH]C'), E0=(303.292, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.788866, 0.0500701,\n 4.22235e-05, -8.64809e-08, 3.53174e-11, 36611.5, 25.2586], Tmin=(100,\n 'K'), Tmax=(987.239, 'K')), NASAPolynomial(coeffs=[16.2187, 0.0373502, \n -1.4111e-05, 2.65357e-09, -1.92503e-13, 31138.2, -61.2734], Tmin=(\n 987.239, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(303.292, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)'\n ))\nspecies(label='[CH2][C]1C(=C)C(C)C1C(25423)', structure=SMILES(\n '[CH2]C1=C([CH2])C(C)C1C'), E0=(305.852, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.377097, 0.0563026,\n 3.9705e-05, -9.53284e-08, 4.14811e-11, 36937, 26.2973], Tmin=(100, 'K'),\n Tmax=(959.735, 'K')), NASAPolynomial(coeffs=[20.4056, 0.0304853, -\n 1.006e-05, 1.83774e-09, -1.35603e-13, 30437.2, -83.3398], Tmin=(959.735,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 305.852, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=CC(=C)C(C)=CC(24616)', structure=SMILES('C=CC(=C)C(C)=CC'),\n E0=(91.1774, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.236638, 0.0713806, -\n 3.04205e-05, -5.26762e-09, 5.54498e-12, 11111.2, 26.9518], Tmin=(100,\n 'K'), Tmax=(1093.32, 'K')), NASAPolynomial(coeffs=[14.1536, 0.040705, -\n 1.6104e-05, 2.93544e-09, -2.02595e-13, 6858.32, -46.9636], Tmin=(\n 1093.32, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(91.1774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=[C]C(C)C(=C)[CH]C(24183)', structure=SMILES(\n '[CH2]C(=CC)C(C)[C]=C'), E0=(369.44, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 3010, 987.5, 1337.5, 450, \n 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 2950, 3100, 1380, 975, 1025, \n 1650, 1380, 1390, 370, 380, 2900, 435, 350, 440, 435, 1725, 3000, 3100,\n 440, 815, 1455, 1000, 345.333, 347.343], 'cm^-1')), HinderedRotor(\n inertia=(0.119405, 'amu*angstrom^2'), symmetry=1, barrier=(9.93037,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.281457,\n 'amu*angstrom^2'), symmetry=1, barrier=(24.022, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.116909, 'amu*angstrom^2'\n ), symmetry=1, barrier=(9.94809, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447, 'amu*angstrom^2'), symmetry=1, barrier\n =(9.9744, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.116555, 'amu*angstrom^2'), symmetry=1, barrier=(9.93684, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(3625.33, 'J/mol'), sigma=(6.4092, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.299693, 0.0839308, -6.74533e-05, 3.06742e-08, -6.02582e-12, 44564.4,\n 29.0122], Tmin=(100, 'K'), Tmax=(1163.73, 'K')), NASAPolynomial(coeffs=\n [10.857, 0.0476425, -2.06788e-05, 3.8782e-09, -2.69295e-13, 42107.3, -\n 23.5217], Tmin=(1163.73, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(369.44, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='C=C1C(=CC)CC1C(25265)', structure=SMILES('C=C1C(=CC)CC1C'),\n E0=(118.381, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.689924, 0.0550304, \n 2.3689e-05, -6.56265e-08, 2.77602e-11, 14372.8, 24.9628], Tmin=(100,\n 'K'), Tmax=(993.204, 'K')), NASAPolynomial(coeffs=[15.3775, 0.0380508, \n -1.43595e-05, 2.66472e-09, -1.90565e-13, 9375.16, -56.2678], Tmin=(\n 993.204, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(118.381, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CHCH3(T)(95)', structure=SMILES('[CH]C'), E0=(343.893,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2750, 2800, 2850, \n 1350, 1500, 750, 1050, 1375, 1000, 592.414, 4000], 'cm^-1')),\n HinderedRotor(inertia=(0.00438699, 'amu*angstrom^2'), symmetry=1,\n barrier=(26.7685, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(28.0532, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.82363, -\n 0.000909515, 3.2138e-05, -3.7348e-08, 1.3309e-11, 41371.4, 7.10948],\n Tmin=(100, 'K'), Tmax=(960.812, 'K')), NASAPolynomial(coeffs=[4.30487, \n 0.00943069, -3.27559e-06, 5.95121e-10, -4.27307e-14, 40709.1, 1.84202],\n Tmin=(960.812, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(343.893, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 128.874, 'J/(mol*K)'), label='CHCH3(T)', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C([C]=C)=CC(24774)', structure=SMILES(\n '[CH2]C([C]=C)=CC'), E0=(370.8, 'kJ/mol'), modes=[HarmonicOscillator(\n frequencies=([1685, 370, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375,\n 1000, 3010, 987.5, 1337.5, 450, 1655, 2950, 3100, 1380, 975, 1025, 1650,\n 350, 440, 435, 1725, 3000, 3100, 440, 815, 1455, 1000, 180], 'cm^-1')),\n HinderedRotor(inertia=(1.17315, 'amu*angstrom^2'), symmetry=1, barrier=\n (26.9731, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 1.17496, 'amu*angstrom^2'), symmetry=1, barrier=(27.0146, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(1.1727, 'amu*angstrom^2'),\n symmetry=1, barrier=(26.9626, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(80.1277, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 1.0818, 0.0569416, -3.56598e-05, 4.1841e-09, 3.20998e-12, 44708.4, \n 20.7527], Tmin=(100, 'K'), Tmax=(982.69, 'K')), NASAPolynomial(coeffs=[\n 12.9204, 0.0239405, -8.46845e-06, 1.46434e-09, -9.91425e-14, 41648.3, -\n 39.886], Tmin=(982.69, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(370.8, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 320.107, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C([CH]C)C(=C)CC(25424)', structure=SMILES(\n '[CH]C(=CC)C(=C)CC'), E0=(330.753, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([2750, 2850, 1437.5, 1250, 1305, 750, 350, 2950, 3100, \n 1380, 975, 1025, 1650, 3010, 987.5, 1337.5, 450, 1655, 2750, 2770, 2790,\n 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, \n 1400, 900, 1100, 325, 375, 415, 465, 420, 450, 1700, 1750, 200, 800, \n 1066.67, 1333.33, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.442166, 0.0858934, -5.1432e-05, 9.5936e-09, 1.54315e-12, 39950.3, \n 30.9724], Tmin=(100, 'K'), Tmax=(1106.5, 'K')), NASAPolynomial(coeffs=[\n 16.3579, 0.0427111, -1.66841e-05, 2.99222e-09, -2.04007e-13, 35158.1, -\n 56.633], Tmin=(1106.5, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(330.753, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=\n (461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='C=CC(=C)C(=C)CC(24630)', structure=SMILES('C=CC(=C)C(=C)CC'),\n E0=(104.558, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.296747, 0.0670054, -\n 1.0269e-05, -3.13536e-08, 1.59568e-11, 12721.3, 27.8384], Tmin=(100,\n 'K'), Tmax=(1010.3, 'K')), NASAPolynomial(coeffs=[15.6889, 0.0379462, -\n 1.44599e-05, 2.64736e-09, -1.86033e-13, 7984.11, -54.6302], Tmin=(\n 1010.3, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0\n =(104.558, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=C1C(=C)C(C)C1C(25274)', structure=SMILES(\n 'C=C1C(=C)C(C)C1C'), E0=(122.654, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.691732, 0.0515838,\n 4.13669e-05, -8.96066e-08, 3.77135e-11, 14890, 23.0693], Tmin=(100, 'K'\n ), Tmax=(969.873, 'K')), NASAPolynomial(coeffs=[17.4573, 0.0342784, -\n 1.20439e-05, 2.21718e-09, -1.61071e-13, 9199.74, -69.8715], Tmin=(\n 969.873, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(122.654, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='N2', structure=SMILES('N#N'), E0=(-8.69489, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(28.0135, 'amu'),\n collisionModel=TransportData(shapeIndex=1, epsilon=(810.913, 'J/mol'),\n sigma=(3.621, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(\n 1.76, 'angstroms^3'), rotrelaxcollnum=4.0, comment=\n 'PrimaryTransportLibrary'), energyTransferModel=SingleExponentialDown(\n alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(\n polynomials=[NASAPolynomial(coeffs=[3.61263, -0.00100893, 2.49898e-06, \n -1.43376e-09, 2.58636e-13, -1051.1, 2.6527], Tmin=(100, 'K'), Tmax=(\n 1817.04, 'K')), NASAPolynomial(coeffs=[2.9759, 0.00164141, -7.19722e-07,\n 1.25378e-10, -7.91526e-15, -1025.84, 5.53757], Tmin=(1817.04, 'K'),\n Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(-8.69489,\n 'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'),\n label='N2', comment='Thermo library: BurkeH2O2'))\nspecies(label='Ne', structure=SMILES('[Ne]'), E0=(-6.19738, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(20.1797, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1235.53, 'J/mol'),\n sigma=(3.758e-10, 'm'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(200, 'K'), Tmax=(1000, 'K')\n ), NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(\n 1000, 'K'), Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), E0=(\n -6.19738, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862,\n 'J/(mol*K)'), label='Ne', comment='Thermo library: primaryThermoLibrary'))\ntransitionState(label='TS1', E0=(291.23, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS2', E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS3', E0=(538.699, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS4', E0=(497.951, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS5', E0=(380.338, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS6', E0=(399.474, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS7', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS8', E0=(722.113, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS9', E0=(343.259, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS10', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS11', E0=(705.575, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS12', E0=(537.022, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS13', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS14', E0=(716.337, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS15', E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS16', E0=(454.469, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS17', E0=(430.619, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS18', E0=(503.849, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS19', E0=(393.718, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS20', E0=(361.682, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS21', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS22', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS23', E0=(375.044, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS24', E0=(274.66, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS25', E0=(463.915, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS26', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS27', E0=(714.692, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS28', E0=(375.062, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS29', E0=(258.055, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS30', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\nreaction(label='reaction1', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'], transitionState=\n 'TS1', kinetics=Arrhenius(A=(5000000000000.0, 's^-1'), n=0, Ea=(41.5431,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction2', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)CC1=CC(25275)'], transitionState='TS2',\n kinetics=Arrhenius(A=(3360000000.0, 's^-1'), n=0.84, Ea=(212.534,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2500, 'K'), comment=\n \"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction3', reactants=['CH3CHCCH2(18175)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS3', kinetics=Arrhenius(A=(0.00086947, 'm^3/(mol*s)'),\n n=2.67356, Ea=(32.0272, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"\n ))\nreaction(label='reaction4', reactants=['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS4',\n kinetics=Arrhenius(A=(7740000000.0, 's^-1'), n=1.08, Ea=(161.921,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction5', reactants=['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS5',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction6', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C](C)C=C(24605)'], transitionState='TS6',\n kinetics=Arrhenius(A=(1600000.0, 's^-1'), n=1.81, Ea=(149.787, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction7', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(C)=CC(24606)'], transitionState='TS7',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction8', reactants=['C=[C][CH]C(18176)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS8', kinetics=Arrhenius(A=(3730380.0, 'm^3/(mol*s)'),\n n=0.027223, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction9', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C]1CC1C(25414)'], transitionState='TS9',\n kinetics=Arrhenius(A=(7367860000000.0, 's^-1'), n=-0.105173, Ea=(\n 93.5715, 'kJ/mol'), T0=(1, 'K'), Tmin=(303.03, 'K'), Tmax=(2000, 'K'),\n comment=\n \"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction10', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=CC)CC1C(25415)'], transitionState='TS10',\n kinetics=Arrhenius(A=(643734000.0, 's^-1'), n=0.926191, Ea=(130.445,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction11', reactants=['CH2(S)(23)',\n '[CH2]C(=C)C([CH2])=CC(25416)'], products=[\n 'C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS11', kinetics=\n Arrhenius(A=(79400000000000.0, 'cm^3/(mol*s)', '*|/', 0.25), n=-0.324,\n Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C[C]=CC(24184)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS12',\n kinetics=Arrhenius(A=(1748420000.0, 's^-1'), n=1.084, Ea=(170.038,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction13', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CC=C1CCC1=CC(25269)'], transitionState='TS13', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction14', reactants=['CH2(19)',\n '[CH2]C([C]=CC)=CC(25417)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS14', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction15', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)C(=C)C1C(25296)'], transitionState='TS15',\n kinetics=Arrhenius(A=(67265800000.0, 's^-1'), n=0.535608, Ea=(216.807,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction16', reactants=['H(3)',\n '[CH2]C(=CC)C(=C)C=C(24604)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS16', kinetics=Arrhenius(A=(231000000.0,\n 'cm^3/(mol*s)'), n=1.64, Ea=(0, 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'),\n Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction17', reactants=['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS17',\n kinetics=Arrhenius(A=(1720000.0, 's^-1'), n=1.99, Ea=(113.805, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction18', reactants=['[CH]=C(CC)C([CH2])=CC(25419)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS18',\n kinetics=Arrhenius(A=(18460000000.0, 's^-1'), n=0.74, Ea=(145.185,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction19', reactants=['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS19',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction20', reactants=['[CH]=C([CH]C)C(C)=CC(25421)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS20',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction21', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(=C)CC(24623)'], transitionState='TS21',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction22', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C[CH][C]1CCC1=CC(25422)'], transitionState='TS22', kinetics=\n Arrhenius(A=(321867000.0, 's^-1'), n=0.926191, Ea=(130.445, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=C)C(C)C1C(25423)'], transitionState='TS23',\n kinetics=Arrhenius(A=(516207000.0, 's^-1'), n=0.911389, Ea=(125.357,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(C)=CC(24616)'], transitionState='TS24', kinetics=\n Arrhenius(A=(12756600000.0, 's^-1'), n=0.137, Ea=(24.9733, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=[C]C(C)C(=C)[CH]C(24183)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS25',\n kinetics=Arrhenius(A=(866000000000.0, 's^-1'), n=0.438, Ea=(94.4747,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction26', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=CC)CC1C(25265)'], transitionState='TS26', kinetics=\n Arrhenius(A=(3240000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction27', reactants=['CHCH3(T)(95)',\n '[CH2]C([C]=C)=CC(24774)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS27', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction28', reactants=['[CH]=C([CH]C)C(=C)CC(25424)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS28',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction29', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(=C)CC(24630)'], transitionState='TS29', kinetics=\n Arrhenius(A=(19260000000.0, 's^-1'), n=0.137, Ea=(8.368, 'kJ/mol'), T0=\n (1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction30', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=C)C(C)C1C(25274)'], transitionState='TS30', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"\n ))\nnetwork(label='4267', isomers=['C=C([CH]C)C(=C)[CH]C(24182)'], reactants=[(\n 'CH3CHCCH2(18175)', 'CH3CHCCH2(18175)')], bathGas={'N2': 0.5, 'Ne': 0.5})\npressureDependence(label='4267', Tmin=(300, 'K'), Tmax=(2000, 'K'), Tcount=\n 8, Tlist=([302.47, 323.145, 369.86, 455.987, 609.649, 885.262, 1353.64,\n 1896.74], 'K'), Pmin=(0.01, 'bar'), Pmax=(100, 'bar'), Pcount=5, Plist=\n ([0.0125282, 0.0667467, 1, 14.982, 79.8202], 'bar'), maximumGrainSize=(\n 0.5, 'kcal/mol'), minimumGrainCount=250, method=\n 'modified strong collision', interpolationModel=('Chebyshev', 6, 4),\n activeKRotor=True, activeJRotor=True, rmgmode=True)\n",
"step-3": "species(\n label = 'C=C([CH]C)C(=C)[CH]C(24182)',\n structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),\n E0 = (249.687,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CH3CHCCH2(18175)',\n structure = SMILES('C=C=CC'),\n E0 = (145.615,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label=\"\"\"CH3CHCCH2\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)CC1=CC(25275)',\n structure = SMILES('[CH2]C1([CH]C)CC1=CC'),\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'C=[C][CH]C(18176)',\n structure = SMILES('[CH2][C]=CC'),\n E0 = (361.056,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment=\"\"\"Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(C)=[C]C(25412)',\n structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(C)=CC(25413)',\n structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C](C)C=C(24605)',\n structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),\n E0 = (216.244,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(C)=CC(24606)',\n structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),\n E0 = (216.244,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C]1CC1C(25414)',\n structure = SMILES('[CH2]C(=CC)[C]1CC1C'),\n E0 = (289.9,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=CC)CC1C(25415)',\n structure = SMILES('[CH2]C1=C([CH]C)CC1C'),\n E0 = (304.572,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = 'CH2(S)(23)',\n structure = SMILES('[CH2]'),\n E0 = (419.862,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2(S)\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=C)C([CH2])=CC(25416)',\n structure = SMILES('[CH2]C(=C)C([CH2])=CC'),\n E0 = (285.713,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),\n HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=C([CH]C)C[C]=CC(24184)',\n structure = SMILES('[CH2]C(=CC)C[C]=CC'),\n E0 = (366.985,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),\n HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CC=C1CCC1=CC(25269)',\n structure = SMILES('CC=C1CCC1=CC'),\n E0 = (114.107,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CH2(19)',\n structure = SMILES('[CH2]'),\n E0 = (381.563,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=CC)=CC(25417)',\n structure = SMILES('[CH2]C([C]=CC)=CC'),\n E0 = (334.774,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)C(=C)C1C(25296)',\n structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'H(3)',\n structure = SMILES('[H]'),\n E0 = (211.792,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (1.00794,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"H\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(=C)C=C(24604)',\n structure = SMILES('[CH2]C(=CC)C(=C)C=C'),\n E0 = (242.677,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),\n HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 2,\n opticalIsomers = 1,\n molecularWeight = (107.173,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]CC(=C)C([CH2])=CC(25418)',\n structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),\n E0 = (316.814,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C(CC)C([CH2])=CC(25419)',\n structure = SMILES('[CH]=C(CC)C([CH2])=CC'),\n E0 = (358.664,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(=C)CC(25420)',\n structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),\n E0 = (349.41,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(C)=CC(25421)',\n structure = SMILES('[CH]C(=CC)C(C)=CC'),\n E0 = (317.373,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(=C)CC(24623)',\n structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),\n E0 = (228.159,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C[CH][C]1CCC1=CC(25422)',\n structure = SMILES('C[CH]C1CCC=1[CH]C'),\n E0 = (303.292,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=C)C(C)C1C(25423)',\n structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),\n E0 = (305.852,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(C)=CC(24616)',\n structure = SMILES('C=CC(=C)C(C)=CC'),\n E0 = (91.1774,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=[C]C(C)C(=C)[CH]C(24183)',\n structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),\n E0 = (369.44,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),\n HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=CC)CC1C(25265)',\n structure = SMILES('C=C1C(=CC)CC1C'),\n E0 = (118.381,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CHCH3(T)(95)',\n structure = SMILES('[CH]C'),\n E0 = (343.893,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),\n HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (28.0532,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label=\"\"\"CHCH3(T)\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=C)=CC(24774)',\n structure = SMILES('[CH2]C([C]=C)=CC'),\n E0 = (370.8,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (80.1277,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(=C)CC(25424)',\n structure = SMILES('[CH]C(=CC)C(=C)CC'),\n E0 = (330.753,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(=C)CC(24630)',\n structure = SMILES('C=CC(=C)C(=C)CC'),\n E0 = (104.558,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=C)C(C)C1C(25274)',\n structure = SMILES('C=C1C(=C)C(C)C1C'),\n E0 = (122.654,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'N2',\n structure = SMILES('N#N'),\n E0 = (-8.69489,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (28.0135,'amu'),\n collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment=\"\"\"PrimaryTransportLibrary\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label=\"\"\"N2\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = 'Ne',\n structure = SMILES('[Ne]'),\n E0 = (-6.19738,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (20.1797,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"Ne\"\"\", comment=\"\"\"Thermo library: primaryThermoLibrary\"\"\"),\n)\n\ntransitionState(\n label = 'TS1',\n E0 = (291.23,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS2',\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS3',\n E0 = (538.699,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS4',\n E0 = (497.951,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS5',\n E0 = (380.338,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS6',\n E0 = (399.474,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS7',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS8',\n E0 = (722.113,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS9',\n E0 = (343.259,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS10',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS11',\n E0 = (705.575,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS12',\n E0 = (537.022,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS13',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS14',\n E0 = (716.337,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS15',\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS16',\n E0 = (454.469,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS17',\n E0 = (430.619,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS18',\n E0 = (503.849,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS19',\n E0 = (393.718,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS20',\n E0 = (361.682,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS21',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS22',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS23',\n E0 = (375.044,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS24',\n E0 = (274.66,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS25',\n E0 = (463.915,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS26',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS27',\n E0 = (714.692,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS28',\n E0 = (375.062,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS29',\n E0 = (258.055,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS30',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\nreaction(\n label = 'reaction1',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],\n transitionState = 'TS1',\n kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction2',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)CC1=CC(25275)'],\n transitionState = 'TS2',\n kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment=\"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction3',\n reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS3',\n kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"),\n)\n\nreaction(\n label = 'reaction4',\n reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS4',\n kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction5',\n reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS5',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction6',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C](C)C=C(24605)'],\n transitionState = 'TS6',\n kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction7',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(C)=CC(24606)'],\n transitionState = 'TS7',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction8',\n reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS8',\n kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction9',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C]1CC1C(25414)'],\n transitionState = 'TS9',\n kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction10',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=CC)CC1C(25415)'],\n transitionState = 'TS10',\n kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction11',\n reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS11',\n kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C[C]=CC(24184)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS12',\n kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction13',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CC=C1CCC1=CC(25269)'],\n transitionState = 'TS13',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction14',\n reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS14',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction15',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],\n transitionState = 'TS15',\n kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction16',\n reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS16',\n kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction17',\n reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS17',\n kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction18',\n reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS18',\n kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction19',\n reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS19',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction20',\n reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS20',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction21',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(=C)CC(24623)'],\n transitionState = 'TS21',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction22',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C[CH][C]1CCC1=CC(25422)'],\n transitionState = 'TS22',\n kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],\n transitionState = 'TS23',\n kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(C)=CC(24616)'],\n transitionState = 'TS24',\n kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS25',\n kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction26',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=CC)CC1C(25265)'],\n transitionState = 'TS26',\n kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction27',\n reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS27',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction28',\n reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS28',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction29',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(=C)CC(24630)'],\n transitionState = 'TS29',\n kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction30',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=C)C(C)C1C(25274)'],\n transitionState = 'TS30',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"),\n)\n\nnetwork(\n label = '4267',\n isomers = [\n 'C=C([CH]C)C(=C)[CH]C(24182)',\n ],\n reactants = [\n ('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),\n ],\n bathGas = {\n 'N2': 0.5,\n 'Ne': 0.5,\n },\n)\n\npressureDependence(\n label = '4267',\n Tmin = (300,'K'),\n Tmax = (2000,'K'),\n Tcount = 8,\n Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),\n Pmin = (0.01,'bar'),\n Pmax = (100,'bar'),\n Pcount = 5,\n Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),\n maximumGrainSize = (0.5,'kcal/mol'),\n minimumGrainCount = 250,\n method = 'modified strong collision',\n interpolationModel = ('Chebyshev', 6, 4),\n activeKRotor = True,\n activeJRotor = True,\n rmgmode = True,\n)\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
from handlers.base import Base
class Home(Base):
def start(self):
from movuca import DataBase, User
from datamodel.article import Article, ContentType, Category
from datamodel.ads import Ads
self.db = DataBase([User, ContentType, Category, Article, Ads])
def pre_render(self):
# obrigatorio ter um config, um self.response|request, que tenha um render self.response.render
self.response = self.db.response
self.request = self.db.request
self.config = self.db.config
#self.view = "app/home.html"
self.response.meta.title = self.db.config.meta.title
self.response.meta.description = self.db.config.meta.description
self.response.meta.keywords = self.db.config.meta.keywords
self.context.use_facebook = self.db.config.auth.use_facebook
def last_articles(self):
from helpers.article import latest_articles
self.context.latest_articles = latest_articles(self.db)
def ads(self):
self.context.ads = self.db(self.db.Ads.place == "top_slider").select(limitby=(0, 5), orderby="<random>")
if not self.context.ads:
from gluon.storage import Storage
self.context.ads = [Storage(id=1, thumbnail='', link=self.db.CURL('contact', 'ads')),
Storage(id=2, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your add here!"), link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placekitten.com/250/220", link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your Logo"), link=self.db.CURL('contact', 'ads'))
]
def featured(self):
self.context.featured = self.db(self.db.Article.featured == True).select(limitby=(0, 4), orderby="<random>")
if not self.context.featured:
self.context.featured = self.db(self.db.Article).select(limitby=(0, 4), orderby=~self.db.Article.likes)
|
normal
|
{
"blob_id": "9d0d4707cc9a654752dd0b98fe0fec6a0c1419a1",
"index": 3029,
"step-1": "<mask token>\n\n\nclass Home(Base):\n <mask token>\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n <mask token>\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-2": "<mask token>\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n <mask token>\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-3": "<mask token>\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-4": "from handlers.base import Base\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom handlers.base import Base\n\n\nclass Home(Base):\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n # obrigatorio ter um config, um self.response|request, que tenha um render self.response.render\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n #self.view = \"app/home.html\"\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == \"top_slider\").select(limitby=(0, 5), orderby=\"<random>\")\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.CURL('contact', 'ads')),\n Storage(id=2, thumbnail=\"http://placehold.it/250x220&text=%s\" % self.db.T(\"Your add here!\"), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail=\"http://placekitten.com/250/220\", link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail=\"http://placehold.it/250x220&text=%s\" % self.db.T(\"Your Logo\"), link=self.db.CURL('contact', 'ads'))\n ]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True).select(limitby=(0, 4), orderby=\"<random>\")\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby=(0, 4), orderby=~self.db.Article.likes)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from os import listdir
from os.path import isfile, join
from datetime import date
mypath = '/Users/kachunfung/python/codewars/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
py_removed = [i.replace('.py','') for i in onlyfiles]
file_counter_removed = py_removed.remove('file_counter')
day_removed = max([int(j.replace('day','')) for j in py_removed])
d0 = date(2016, 11, 7)
d1 = date.today()
delta = d1 - d0
if day_removed >= delta.days:
print "Well done!\nYou are %s days ahead.\nKeep up the good work! I am proud of you." % (day_removed - delta.days)
else:
print "You are %s days behind schedule.\nTry your best and Never give up!" % (delta.days - day_removed)
print "\nYou have completed %s codewars kata since 7th December 2016" % day_removed
|
normal
|
{
"blob_id": "592d5074eeca74a5845d26ee2ca6aba8c3d0f989",
"index": 8929,
"step-1": "from os import listdir\nfrom os.path import isfile, join\nfrom datetime import date\n\nmypath = '/Users/kachunfung/python/codewars/'\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\npy_removed = [i.replace('.py','') for i in onlyfiles]\nfile_counter_removed = py_removed.remove('file_counter')\nday_removed = max([int(j.replace('day','')) for j in py_removed])\n\nd0 = date(2016, 11, 7)\nd1 = date.today()\ndelta = d1 - d0\n\nif day_removed >= delta.days:\n print \"Well done!\\nYou are %s days ahead.\\nKeep up the good work! I am proud of you.\" % (day_removed - delta.days)\nelse:\n print \"You are %s days behind schedule.\\nTry your best and Never give up!\" % (delta.days - day_removed)\nprint \"\\nYou have completed %s codewars kata since 7th December 2016\" % day_removed\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask, request, render_template, redirect
from stories import Story, stories
# from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
# app.config['SECRET_KEY'] = "secret"
# debug = DebugToolbarExtension(app)
# my original approach involved using a global story variable to store the instances which were in this file
# After looking at the answer code, storing this data in the instance maskes more sense
# story_global = None
@app.route('/')
def home_page():
"""Offer user choice of Madlib Games"""
return render_template('index.html', stories=stories.values())
@app.route('/form')
def show_form():
"""Show Form for User Input"""
story_title = request.args["madlib"]
for story in stories.values():
if story.title == story_title:
story_for_form = story
return render_template('form.html', s=story_for_form, story_title=story_title)
@app.route("/story")
def show_story():
"""Display Madlib Story"""
answers = request.args
story_title = request.args["story_title"]
for story in stories.values():
if story.title == story_title:
story_to_gen = story
return render_template("story.html", story_to_gen=story_to_gen, user_answers=answers)
@app.route('/play-again')
def play_again():
"""Redirect Home"""
return redirect('/')
|
normal
|
{
"blob_id": "08ed57ffb7a83973059d62f686f77b1bea136fbd",
"index": 3828,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\n<mask token>\n\n\[email protected]('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\[email protected]('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\[email protected]('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\[email protected]('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\[email protected]('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\[email protected]('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\[email protected]('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-4": "from flask import Flask, request, render_template, redirect\nfrom stories import Story, stories\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\[email protected]('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\[email protected]('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\[email protected]('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-5": "from flask import Flask, request, render_template, redirect\nfrom stories import Story, stories\n# from flask_debugtoolbar import DebugToolbarExtension\n\napp = Flask(__name__)\t\n# app.config['SECRET_KEY'] = \"secret\"\n\n# debug = DebugToolbarExtension(app)\n\n\n# my original approach involved using a global story variable to store the instances which were in this file\n# After looking at the answer code, storing this data in the instance maskes more sense\n# story_global = None\n\[email protected]('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n\n return render_template('index.html', stories=stories.values())\n\n\[email protected]('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n\n story_title = request.args[\"madlib\"]\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n \n return render_template('form.html', s=story_for_form, story_title=story_title)\n\n\[email protected](\"/story\")\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n\n answers = request.args\n story_title = request.args[\"story_title\"]\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n \n return render_template(\"story.html\", story_to_gen=story_to_gen, user_answers=answers)\n\n\[email protected]('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n\n return redirect('/')\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
"""
corner cases like:
word[!?',;.]
word[!?',;.]word[!?',;.]word
so don't consider the punctuation will only exist after one word, and followed by a whitespace
use re for regular expression match,
replace or punctuations, and split words
"""
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
# def filterpunc(word):
# word = word.lower()
# for p in "!?',;.":
# word = word.strip(p)
# if word in banned:
# return ''
# return word
# from collections import Counter
# banned = set(banned)
# words = paragraph.strip().split()
# # words = list(filter(lambda x: not any(map(lambda y: y in x, list("!?',;."))), words))
# words = list(filter(lambda x: x not in "!?',;.", words))
# words = map(filterpunc, words)
# words = filter(None, words)
# return Counter(words).most_common(1)[0][0]
import re
from collections import Counter
paragraph = re.sub('[\W]', ' ', paragraph)
words = paragraph.strip().split()
words = map(str.lower, words)
cnt = Counter(words)
for word,_ in cnt.most_common():
if word not in banned:
return word
if __name__ == "__main__":
# paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
# banned = ["hit"]
# paragraph = "Bob. hIt, baLl"
# banned = ["bob", "hit"]
paragraph = "a, a, a, a, b,b,b,c, c"
banned = ['a']
sol = Solution()
print(sol.mostCommonWord(paragraph, banned))
|
normal
|
{
"blob_id": "3bb50b61c7a3e98ede0a31e574f39b4ea7f22de5",
"index": 9197,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n import re\n from collections import Counter\n paragraph = re.sub('[\\\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word, _ in cnt.most_common():\n if word not in banned:\n return word\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n import re\n from collections import Counter\n paragraph = re.sub('[\\\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word, _ in cnt.most_common():\n if word not in banned:\n return word\n\n\nif __name__ == '__main__':\n paragraph = 'a, a, a, a, b,b,b,c, c'\n banned = ['a']\n sol = Solution()\n print(sol.mostCommonWord(paragraph, banned))\n",
"step-5": "\"\"\"\ncorner cases like:\n\nword[!?',;.]\nword[!?',;.]word[!?',;.]word\n\n\nso don't consider the punctuation will only exist after one word, and followed by a whitespace\n\nuse re for regular expression match,\nreplace or punctuations, and split words\n\n\"\"\"\n\n\nclass Solution:\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n # def filterpunc(word):\n # word = word.lower()\n # for p in \"!?',;.\":\n # word = word.strip(p)\n # if word in banned:\n # return ''\n # return word\n # from collections import Counter\n # banned = set(banned)\n # words = paragraph.strip().split()\n # # words = list(filter(lambda x: not any(map(lambda y: y in x, list(\"!?',;.\"))), words))\n # words = list(filter(lambda x: x not in \"!?',;.\", words))\n # words = map(filterpunc, words)\n # words = filter(None, words)\n # return Counter(words).most_common(1)[0][0]\n \n import re\n from collections import Counter\n paragraph = re.sub('[\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word,_ in cnt.most_common():\n if word not in banned:\n return word\n\n\nif __name__ == \"__main__\":\n # paragraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\n # banned = [\"hit\"]\n # paragraph = \"Bob. hIt, baLl\"\n # banned = [\"bob\", \"hit\"]\n paragraph = \"a, a, a, a, b,b,b,c, c\"\n banned = ['a']\n sol = Solution()\n print(sol.mostCommonWord(paragraph, banned))\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
"""
import random
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print('Массив случайных чисел:\n', array)
min_el = array[0]
max_el = array[0]
max_el_inx = 0
min_el_inx = 0
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print('Массив, в котром поменяны местами минимальный и максимальный элементы:\n', array)
|
normal
|
{
"blob_id": "6027836b1b5d3cb8b842b1a1b77f5c9777269896",
"index": 7177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Массив случайных чисел:\\n', array)\n<mask token>\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-3": "<mask token>\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-4": "<mask token>\nimport random\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-5": "\"\"\"\n В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\n\"\"\"\n\n\nimport random\n\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\n\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint('Массив, в котром поменяны местами минимальный и максимальный элементы:\\n', array)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Given a string S, find the longest palindromic substring in S. You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if len(s) == 0:
return ""
if len(s) == 1:
return s
start = -1
end = -2
for i in range(len(s)):
side = 1
while i - side >= 0 and i + side < len(s) and s[i - side] == s[i + side]:
side += 1
if (side - 1) * 2 + 1 > end - start + 1:
start = i - (side - 1)
end = i + side
out_string = s[start:end]
start = -1
end = -2
for i in range(len(s) - 1):
side = 0
while i - side >= 0 and i + 1 + side < len(s) and s[i - side] == s[i + 1 + side]:
side += 1
if side * 2 > end - start + 1:
start = i - side + 1
end = i + 1 + side
return out_string if len(out_string) > end - start else s[start:end]
|
normal
|
{
"blob_id": "7c39b3927bc0702818c54875785b4657c20c441e",
"index": 2272,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if len(s) == 0:\n return ''\n if len(s) == 1:\n return s\n start = -1\n end = -2\n for i in range(len(s)):\n side = 1\n while i - side >= 0 and i + side < len(s) and s[i - side] == s[\n i + side]:\n side += 1\n if (side - 1) * 2 + 1 > end - start + 1:\n start = i - (side - 1)\n end = i + side\n out_string = s[start:end]\n start = -1\n end = -2\n for i in range(len(s) - 1):\n side = 0\n while i - side >= 0 and i + 1 + side < len(s) and s[i - side] == s[\n i + 1 + side]:\n side += 1\n if side * 2 > end - start + 1:\n start = i - side + 1\n end = i + 1 + side\n return out_string if len(out_string) > end - start else s[start:end]\n",
"step-4": "# Given a string S, find the longest palindromic substring in S. You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.\n\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if len(s) == 0:\n return \"\"\n if len(s) == 1:\n return s\n \n start = -1\n end = -2\n for i in range(len(s)):\n side = 1\n while i - side >= 0 and i + side < len(s) and s[i - side] == s[i + side]:\n side += 1\n if (side - 1) * 2 + 1 > end - start + 1:\n start = i - (side - 1)\n end = i + side\n out_string = s[start:end]\n start = -1\n end = -2\n for i in range(len(s) - 1):\n side = 0\n while i - side >= 0 and i + 1 + side < len(s) and s[i - side] == s[i + 1 + side]:\n side += 1\n if side * 2 > end - start + 1:\n start = i - side + 1\n end = i + 1 + side\n return out_string if len(out_string) > end - start else s[start:end]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from .models import JobListing
from .models import Employer
admin.site.register(JobListing)
admin.site.register(Employer)
|
normal
|
{
"blob_id": "a96575d507a91472176c99d4d55e2a3bbf8111d1",
"index": 2707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(JobListing)\nadmin.site.register(Employer)\n",
"step-3": "from django.contrib import admin\nfrom .models import JobListing\nfrom .models import Employer\nadmin.site.register(JobListing)\nadmin.site.register(Employer)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
Created on 14 november 2015
@author: federico
'''
import paho.mqtt.client as mosquitto
import json
import urllib,urllib2
import datetime
import threading
import time
from pygame import mixer
from datetime import timedelta
#ALARM SOUND PATH
alarm_path="/home/pi/SmartBed/Smart_Bed/src/Rooster.wav"
#DWEET&FREEBOARD
thing_name='smart_bed_status'
url_freeboard="https://dweet.io:443/dweet/for/smart_bed_values"
url_status="https://dweet.io:443/get/latest/dweet/for/smart_bed_status"
url_freeboard_qos="https://dweet.io:443/dweet/for/smart_bed_qos"
url_freeboard_sleep_time="https://dweet.io:443/dweet/for/smart_bed_sleep_time"
#THINGSPEAK
url_thingspeak="https://api.thingspeak.com/update"
channel_id="68285"
api_read="XXXXXXXXXXXXXXX"
api_write="ZZZZZZZZZZZZZZZ"
#CONSTANT
soglia=10
broker_ip="127.0.0.1"
smart_alarm_threshold=10 #threshold for the smart alarm:how much movement is needed to ring
sensor_freq=2 #seconds
sensor_MAXVAL=255 #g
#LOCK VARIABLE:this variable is needed to avoid that 2 threads change the status variable
alarm_clock_lock=0
#queue
q=[]
nsamples=10
#status of the system
status=0
mov_tot=0.1
alarm_sensibility=5 #seconds
def on_connect(client,userdata,rc):
print ("connected with result code"+str(rc))
client.subscribe("smart_bed/values", 0)
def on_message(client,userdata,msg):
print "Raspberry receive data Topic:",msg.topic+'\nMessage:'+str(msg.payload)
jsonfile=json.loads(msg.payload)
queue_insert(jsonfile)
def queue_insert(jsonfile):
x=int(jsonfile["e"]["v"]["x"])
y=int(jsonfile["e"]["v"]["y"])
z=int(jsonfile["e"]["v"]["z"])
valore=transform_function(x, y, z)
if(valore>soglia):
q.append(valore-soglia)
print "Value appended in the queue"+str(valore-soglia)
else:
q.append(0)
print "0 appended"
#SENDING DATA TO FREEBOARD LIVE VIEW
values={}
values["x"]=x
values["y"]=y
values["z"]=z
data = urllib.urlencode(values)
req = urllib2.Request(url_freeboard, data)
urllib2.urlopen(req)
def send_data_c(coda):
global mov_tot
somma=0
conta=0
valore=0
for l in coda:
if l!=0:
somma=somma+l
conta=conta+1
if somma!=0:
valore=float(somma)/conta
mov_tot=mov_tot+valore
print "I'm ready to send"+ str(valore)+" to thingspeak"
#sending data to thingspeak movement
params = urllib.urlencode({'api_key': api_write, 'field1': "%.2f" % valore})
req=urllib2.Request(url_thingspeak,params)
urllib2.urlopen(req)
def transform_function(x,y,z):
#PARAMETERS TO SET IN CASE OF SPECIAL INTEREST IN ONE DIRECTION
a=1
b=1
c=1
valore=a*x+b*y+z*c
return valore
def process_data(ore,minu,init_time):
global mov_tot
while(status==1):
if len(q)==nsamples:
coda=q[:]
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST DATA IN THE QUEUE
if len(q)!=0:
coda=q
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST STATISTICS
i=datetime.datetime.now()
#sleep time in minutes
b=i-init_time
sleep_time=b.seconds/60
print "Passed seconds from the start"+str(b.seconds)
print "Total movement"+str(mov_tot)
#MYFUNCTION TO QUALITY OF SLEEP
qos=-((100*sensor_freq*nsamples*15/(sensor_MAXVAL*3*b.seconds)))*mov_tot+100
#LAST DATA TO FREEBOARD
data = urllib.urlencode({'qos': "%.0f" %qos})
req = urllib2.Request(url_freeboard_qos, data)
urllib2.urlopen(req)
data = urllib.urlencode({'sleep_time':sleep_time})
req = urllib2.Request(url_freeboard_sleep_time, data)
urllib2.urlopen(req)
#LAST DATA TO THINGSPEAK. WHILE CYCLE IS NEEDED BECAUSE DATA ON THINGSPEAK CAN BE UPDATED EACH 15s
resp='0'
times=0
while resp=='0':
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field2': "%.1f" % sleep_time})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
resp='0'
times=0
while(resp=='0'):
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field3': "%.1f" % qos})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
#needed for next measurement
mov_tot=0.1
def alarmclock(h,m):
global alarm_clock_lock
while(status==1):
i=datetime.datetime.now()
if (i.hour==h) & (i.minute==m):
if alarm_clock_lock==0:
#LOCK
alarm_clock_lock=1
print "ALARM FROM BASIC ALARMCLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
#alarm sensibility
time.sleep(alarm_sensibility)
def sound_clock():
mixer.init()
mixer.music.load(alarm_path)
while(status==1):
mixer.music.play()
time.sleep(4)
def smart_alarm(a_h,a_m,ore,minu,smart_min):
#bad thing but signals cannot be managed as a child thread
time_to_wait=abs(a_h-ore)*3600+abs(a_m-abs((minu-smart_min)%60))*60
print "second to sleep"+str(time_to_wait)
time.sleep(time_to_wait)
global mov_tot
initial_mov=mov_tot
while(status==1):
print "mov_tot"+ str(mov_tot)
print "initial_mov"+str(initial_mov)
if((mov_tot-initial_mov)>smart_alarm_threshold):
global alarm_clock_lock
#LOCK
if alarm_clock_lock==0:
alarm_clock_lock=1
print "ALARM FROM SMART CLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
time.sleep(5)
if __name__ == '__main__':
client=mosquitto.Mosquitto("Raspberry")
client.on_connect=on_connect
client.on_message = on_message
client.connect(broker_ip, port=1883, keepalive=60, bind_address="")
client.loop_start()
while(True):
req=urllib2.Request(url_status)
resp=urllib2.urlopen(req)
dweet=resp.read()
dweet2=json.loads(dweet)
stat=dweet2["with"][0]["content"]["status"]
if (stat==1) & (status==0):
status=1
print "System is switched ON"
ore=dweet2["with"][0]["content"]["alarm_hour"]
minu=dweet2["with"][0]["content"]["alarm_min"]
smart_min=dweet2["with"][0]["content"]["smart_alarm"]
init_time=datetime.datetime.now()
actual_hour=init_time.hour
actual_min=init_time.minute
t=threading.Thread(target=process_data,args=(actual_hour,actual_min,init_time))
t.daemon=True
t.start()
l=threading.Thread(target=alarmclock,args=(ore,minu,))
l.daemon=True
l.start()
if(smart_min!=0):
h=threading.Thread(target=smart_alarm,args=(actual_hour,actual_min,ore,minu,smart_min,))
h.daemon=True
h.start()
diz={}
diz["status"]=1
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
elif (stat==0) & (status==1):
diz={}
diz["status"]=0
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
status=0
print "System is switched OFF"
time.sleep(2)
client.loop_stop()
|
normal
|
{
"blob_id": "05bd95966d72dd40b9b828932b0bf70e40ddb573",
"index": 1511,
"step-1": "'''\nCreated on 14 november 2015\n\n@author: federico\n'''\n\nimport paho.mqtt.client as mosquitto\nimport json\nimport urllib,urllib2\nimport datetime\nimport threading\nimport time\nfrom pygame import mixer\nfrom datetime import timedelta\n\n\n#ALARM SOUND PATH\nalarm_path=\"/home/pi/SmartBed/Smart_Bed/src/Rooster.wav\"\n\n#DWEET&FREEBOARD \nthing_name='smart_bed_status'\nurl_freeboard=\"https://dweet.io:443/dweet/for/smart_bed_values\"\nurl_status=\"https://dweet.io:443/get/latest/dweet/for/smart_bed_status\"\nurl_freeboard_qos=\"https://dweet.io:443/dweet/for/smart_bed_qos\"\nurl_freeboard_sleep_time=\"https://dweet.io:443/dweet/for/smart_bed_sleep_time\"\n\n#THINGSPEAK\nurl_thingspeak=\"https://api.thingspeak.com/update\"\nchannel_id=\"68285\"\napi_read=\"XXXXXXXXXXXXXXX\"\napi_write=\"ZZZZZZZZZZZZZZZ\"\n\n#CONSTANT\nsoglia=10\nbroker_ip=\"127.0.0.1\"\nsmart_alarm_threshold=10 #threshold for the smart alarm:how much movement is needed to ring\nsensor_freq=2 #seconds\nsensor_MAXVAL=255 #g\n\n#LOCK VARIABLE:this variable is needed to avoid that 2 threads change the status variable\nalarm_clock_lock=0\n\n#queue\nq=[]\nnsamples=10\n\n#status of the system\nstatus=0\nmov_tot=0.1\nalarm_sensibility=5 #seconds\n\n\ndef on_connect(client,userdata,rc):\n print (\"connected with result code\"+str(rc))\n client.subscribe(\"smart_bed/values\", 0)\n \n \ndef on_message(client,userdata,msg):\n print \"Raspberry receive data Topic:\",msg.topic+'\\nMessage:'+str(msg.payload)\n jsonfile=json.loads(msg.payload)\n \n queue_insert(jsonfile)\n\ndef queue_insert(jsonfile):\n \n x=int(jsonfile[\"e\"][\"v\"][\"x\"])\n y=int(jsonfile[\"e\"][\"v\"][\"y\"])\n z=int(jsonfile[\"e\"][\"v\"][\"z\"])\n \n valore=transform_function(x, y, z) \n \n if(valore>soglia):\n q.append(valore-soglia)\n print \"Value appended in the queue\"+str(valore-soglia)\n else:\n q.append(0)\n print \"0 appended\"\n \n #SENDING DATA TO FREEBOARD LIVE VIEW\n values={}\n values[\"x\"]=x\n values[\"y\"]=y\n values[\"z\"]=z\n data = urllib.urlencode(values)\n req = urllib2.Request(url_freeboard, data)\n urllib2.urlopen(req)\n\n \ndef send_data_c(coda): \n global mov_tot\n somma=0\n conta=0\n valore=0\n for l in coda:\n if l!=0:\n somma=somma+l \n conta=conta+1\n \n if somma!=0: \n valore=float(somma)/conta\n mov_tot=mov_tot+valore\n print \"I'm ready to send\"+ str(valore)+\" to thingspeak\"\n \n #sending data to thingspeak movement\n params = urllib.urlencode({'api_key': api_write, 'field1': \"%.2f\" % valore})\n req=urllib2.Request(url_thingspeak,params)\n urllib2.urlopen(req)\n\n \ndef transform_function(x,y,z):\n\n #PARAMETERS TO SET IN CASE OF SPECIAL INTEREST IN ONE DIRECTION\n a=1\n b=1\n c=1\n valore=a*x+b*y+z*c\n return valore\n \ndef process_data(ore,minu,init_time):\n \n global mov_tot\n \n while(status==1):\n if len(q)==nsamples:\n coda=q[:]\n \n tr=threading.Thread(target=send_data_c,args=(coda,))\n tr.start()\n del q[:]\n\n #LAST DATA IN THE QUEUE\n \n if len(q)!=0:\n coda=q\n tr=threading.Thread(target=send_data_c,args=(coda,))\n tr.start()\n del q[:]\n \n #LAST STATISTICS\n\n i=datetime.datetime.now() \n \n #sleep time in minutes\n b=i-init_time\n sleep_time=b.seconds/60\n \n print \"Passed seconds from the start\"+str(b.seconds)\n print \"Total movement\"+str(mov_tot) \n \n #MYFUNCTION TO QUALITY OF SLEEP\n qos=-((100*sensor_freq*nsamples*15/(sensor_MAXVAL*3*b.seconds)))*mov_tot+100\n \n #LAST DATA TO FREEBOARD\n \n data = urllib.urlencode({'qos': \"%.0f\" %qos})\n req = urllib2.Request(url_freeboard_qos, data)\n urllib2.urlopen(req)\n\n data = urllib.urlencode({'sleep_time':sleep_time})\n req = urllib2.Request(url_freeboard_sleep_time, data)\n urllib2.urlopen(req)\n\n #LAST DATA TO THINGSPEAK. WHILE CYCLE IS NEEDED BECAUSE DATA ON THINGSPEAK CAN BE UPDATED EACH 15s\n resp='0'\n times=0\n while resp=='0':\n time.sleep(times)\n \tparams = urllib.urlencode({'api_key': api_write, 'field2': \"%.1f\" % sleep_time})\n \treq=urllib2.Request(url_thingspeak,params)\n \trisp=urllib2.urlopen(req)\n \tresp=risp.read()\n \ttimes=times+5\n\n resp='0'\n times=0\n while(resp=='0'):\n\ttime.sleep(times)\n\tparams = urllib.urlencode({'api_key': api_write, 'field3': \"%.1f\" % qos})\n \treq=urllib2.Request(url_thingspeak,params)\n \trisp=urllib2.urlopen(req)\n \tresp=risp.read()\n\ttimes=times+5\n\n \n \n #needed for next measurement\n mov_tot=0.1\n \ndef alarmclock(h,m):\n global alarm_clock_lock\n \n while(status==1):\n i=datetime.datetime.now() \n if (i.hour==h) & (i.minute==m):\n if alarm_clock_lock==0:\n #LOCK\n alarm_clock_lock=1\n print \"ALARM FROM BASIC ALARMCLOCK\"\n sound_clock()\n #UNLOCK\n alarm_clock_lock=0\n \n #alarm sensibility \n time.sleep(alarm_sensibility) \n \ndef sound_clock():\n \n mixer.init()\n mixer.music.load(alarm_path)\n while(status==1):\n mixer.music.play()\n time.sleep(4)\n \ndef smart_alarm(a_h,a_m,ore,minu,smart_min):\n #bad thing but signals cannot be managed as a child thread \n time_to_wait=abs(a_h-ore)*3600+abs(a_m-abs((minu-smart_min)%60))*60\n print \"second to sleep\"+str(time_to_wait)\n time.sleep(time_to_wait)\n \n global mov_tot\n initial_mov=mov_tot\n \n while(status==1):\n print \"mov_tot\"+ str(mov_tot)\n print \"initial_mov\"+str(initial_mov)\n if((mov_tot-initial_mov)>smart_alarm_threshold): \n global alarm_clock_lock\n #LOCK\n if alarm_clock_lock==0:\n alarm_clock_lock=1\n print \"ALARM FROM SMART CLOCK\"\n sound_clock()\n #UNLOCK\n alarm_clock_lock=0\n \n time.sleep(5)\n \n \n \nif __name__ == '__main__':\n \n client=mosquitto.Mosquitto(\"Raspberry\")\n client.on_connect=on_connect\n client.on_message = on_message\n \n client.connect(broker_ip, port=1883, keepalive=60, bind_address=\"\") \n client.loop_start() \n \n while(True):\n \n req=urllib2.Request(url_status)\n resp=urllib2.urlopen(req)\n dweet=resp.read()\n dweet2=json.loads(dweet)\n stat=dweet2[\"with\"][0][\"content\"][\"status\"]\n \n if (stat==1) & (status==0):\n \n status=1\n print \"System is switched ON\"\n ore=dweet2[\"with\"][0][\"content\"][\"alarm_hour\"]\n minu=dweet2[\"with\"][0][\"content\"][\"alarm_min\"]\n smart_min=dweet2[\"with\"][0][\"content\"][\"smart_alarm\"]\n \n \n\t init_time=datetime.datetime.now() \n actual_hour=init_time.hour\n actual_min=init_time.minute\n \n t=threading.Thread(target=process_data,args=(actual_hour,actual_min,init_time))\n t.daemon=True\n t.start()\n \n l=threading.Thread(target=alarmclock,args=(ore,minu,))\n l.daemon=True\n l.start()\n \n if(smart_min!=0):\n h=threading.Thread(target=smart_alarm,args=(actual_hour,actual_min,ore,minu,smart_min,))\n h.daemon=True\n h.start()\n \n diz={}\n diz[\"status\"]=1\n val=client.publish(\"smart_bed\",json.dumps(diz) , qos=1)\n \n elif (stat==0) & (status==1):\n \n \n diz={}\n diz[\"status\"]=0\n val=client.publish(\"smart_bed\",json.dumps(diz) , qos=1)\n status=0\n \n\n print \"System is switched OFF\"\n \n time.sleep(2)\n \n client.loop_stop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def play_43():
n=int(input('Enter n :'))
l=[]
for i in range(n):
l.append(int(input()))
for i in range(n-1):
for j in range(i+1,n):
if l[i]<l[j]:
continue
return "no"
return "Yes"
play_43()
|
normal
|
{
"blob_id": "1605396a6edb31dd6fe9238a0506f8cfeb794d07",
"index": 5568,
"step-1": "<mask token>\n",
"step-2": "def play_43():\n n = int(input('Enter n :'))\n l = []\n for i in range(n):\n l.append(int(input()))\n for i in range(n - 1):\n for j in range(i + 1, n):\n if l[i] < l[j]:\n continue\n return 'no'\n return 'Yes'\n\n\n<mask token>\n",
"step-3": "def play_43():\n n = int(input('Enter n :'))\n l = []\n for i in range(n):\n l.append(int(input()))\n for i in range(n - 1):\n for j in range(i + 1, n):\n if l[i] < l[j]:\n continue\n return 'no'\n return 'Yes'\n\n\nplay_43()\n",
"step-4": "def play_43():\n\tn=int(input('Enter n :'))\n\tl=[]\n\tfor i in range(n):\n\t\tl.append(int(input()))\n\tfor i in range(n-1):\n\t\tfor j in range(i+1,n):\n\t\t\tif l[i]<l[j]:\n\t\t\t\tcontinue\n\t\t\treturn \"no\"\n\treturn \"Yes\"\nplay_43()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Jeremy Jao
# University of Pittsburgh: DBMI
# 6/18/2013
#
# This is the thing that returns the dictionary of the key. we can edit more code to return different values in the keys (gene) in each dictionary inside the dictionary.
# my sys.argv isn't working in my situation due to my IDE (nor do I not know how it would work.... but yeah........... It's easy to code this.
import cPickle
#import sys
#
#arg = sys.argv[0]
print "Displaying dictionary for " + "MESTIT1"
hi = open("geneDictionary.pickle", "r")
hello = cPickle.load(hi)
print hello["MESTIT1"]
|
normal
|
{
"blob_id": "7016a7dda80c0cfae0e15cf239f6ae64eb9004b7",
"index": 9733,
"step-1": "# Jeremy Jao\r\n# University of Pittsburgh: DBMI\r\n# 6/18/2013\r\n#\r\n# This is the thing that returns the dictionary of the key. we can edit more code to return different values in the keys (gene) in each dictionary inside the dictionary.\r\n# my sys.argv isn't working in my situation due to my IDE (nor do I not know how it would work.... but yeah........... It's easy to code this.\r\n\r\nimport cPickle\r\n#import sys\r\n#\r\n#arg = sys.argv[0]\r\n\r\nprint \"Displaying dictionary for \" + \"MESTIT1\"\r\n\r\nhi = open(\"geneDictionary.pickle\", \"r\")\r\n\r\nhello = cPickle.load(hi)\r\n\r\nprint hello[\"MESTIT1\"]",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Copyright 2020 Side Li, Arun Kumar
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from pyspark.sql import SparkSession
from pyspark.ml.feature import FeatureHasher, StringIndexer, VectorAssembler, StandardScaler
from pyspark.sql.types import *
from pyspark.ml import Pipeline
from pyspark.sql.functions import udf
import random
import pandas as pd
import os
import subprocess
import glob
import json
from hdfs import InsecureClient
import ray
from concurrent.futures import ThreadPoolExecutor
def line_count(filename, hdfs_used=False, client=None):
if hdfs_used:
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
@ray.remote(num_cpus=1, resources={"CustomResource":1})
def line_count_remote(filename, hdfs_used=False, client=None):
if hdfs_used:
if client is None:
client = InsecureClient('http://namenode:9870')
with client.read(filename, encoding='utf-8') as reader:
num_lines = sum(1 for _ in reader)
return num_lines
else:
return int(subprocess.check_output(['wc', '-l', filename]).split()[0])
def check_hdfs_path(path):
return path.startswith("hdfs")
class Metadata(object):
def __init__(self, num_groups, data_type, dimension):
self.num_groups = num_groups
self.data_type = data_type
self.dimension = dimension
self.groups = dict()
def add_group(self, name, path, val_frac):
print("group", name)
self.groups[name] = dict()
all_partitions = []
hdfs_used = check_hdfs_path(path)
client = None
if hdfs_used:
client = InsecureClient('http://namenode:9870')
path = path[len("hdfs://"):]
def fetch_info(filename):
if filename.endswith(".csv"):
file = path + filename
return {
"file_path": file,
"num_examples": ray.get(line_count_remote.remote(file, hdfs_used))
}
if filename.endswith(".png"):
file = path + filename
return {
"file_path": file,
"num_examples": 1
}
if hdfs_used:
files = client.list(path)
executor = ThreadPoolExecutor(max_workers=160)
all_partitions += [i for i in executor.map(fetch_info, files) if i is not None]
else:
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".csv"):
file = path + filename
all_partitions.append({
"file_path": file,
"num_examples": line_count(file)
})
if filename.endswith(".png"):
file = path + filename
all_partitions.append({
"file_path": file,
"num_examples": 1
})
num_files = len(all_partitions)
if val_frac * num_files < 1:
df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(path + "*.csv")], ignore_index=True)
num_examples = df.shape[0]
val_examples = int(num_examples * val_frac)
val = df[:(val_examples if val_examples != 0 else 1)]
train = df[val_examples:]
for f in glob.glob(path + "*"):
os.remove(f)
train.to_csv(path + "train.csv", index=False)
val.to_csv(path + "val.csv", index=False)
self.groups[name]["train_files"] = [{
"file_path": path + "train.csv",
"num_examples": train.shape[0]
}]
self.groups[name]["val_files"] = [{
"file_path": path + "val.csv",
"num_examples": val.shape[0]
}]
self.groups[name]["total_examples"] = train.shape[0]
else:
num_val_files = int(val_frac * num_files)
self.groups[name]["train_files"] = all_partitions[:num_files - num_val_files]
self.groups[name]["val_files"] = all_partitions[num_files - num_val_files:]
self.groups[name]["total_examples"] = sum([p["num_examples"] for p in self.groups[name]["train_files"]])
def to_json(self):
return {
"num_groups": self.num_groups,
"type": self.data_type,
"dimension": self.dimension,
"groups": self.groups
}
def criteo_etl(base_path, in_file, block_size, val_frac):
spark = SparkSession\
.builder\
.appName("PreprocessCriteoData") \
.config("spark.sql.files.maxRecordsPerFile", int(block_size/(4 * (4096*2 + 4) / 1024 / 1024)))\
.getOrCreate()
sc = spark.sparkContext
field = [StructField("Sale", IntegerType(), True),
StructField("SalesAmount", FloatType(), True),
StructField("ConversionDelay", FloatType(), True),
StructField("ClickTimestamp", StringType(), True),
StructField("NumClicksPerWeek", FloatType(), True),
StructField("ProductPrice", FloatType(), True),
StructField("ProductAgeGroup", StringType(), True),
StructField("DeviceType", StringType(), True),
StructField("AudienceId", StringType(), True),
StructField("ProductGender", StringType(), True),
StructField("ProductBrand", StringType(), True),
StructField("ProductCategory1", StringType(), True),
StructField("ProductCategory2", StringType(), True),
StructField("ProductCategory3", StringType(), True),
StructField("ProductCategory4", StringType(), True),
StructField("ProductCategory5", StringType(), True),
StructField("ProductCategory6", StringType(), True),
StructField("ProductCategory7", StringType(), True),
StructField("ProductCountry", StringType(), True),
StructField("ProductId", StringType(), True),
StructField("ProductTitle", StringType(), True),
StructField("PartnerId", StringType(), True),
StructField("UserId", StringType(), True)]
schema = StructType(field)
df = spark.read.format("csv").option("delimiter", "\t").schema(schema).load(base_path + in_file)
hasher = FeatureHasher(numFeatures=4096 * 2,
inputCols=["ProductAgeGroup", "DeviceType", "AudienceId",
"ProductGender", "ProductBrand", "ProductCategory1", "ProductCategory2",
"ProductCategory3", "ProductCategory4", "ProductCategory5",
"ProductCategory6", "ProductCategory7", "ProductId", "UserId"],
outputCol="CatFeatures")
product_indexer = StringIndexer(inputCol="ProductCountry", outputCol="ProductCountryIndexed")
partner_indexer = StringIndexer(inputCol="PartnerId", outputCol="PartnerIdIndexed")
vector_assembler = VectorAssembler(inputCols=["NumClicksPerWeek", "ProductPrice"], outputCol="NumFeatures")
scaler = StandardScaler(inputCol="NumFeatures", outputCol="ScaledNumFeatures")
final_assembler = VectorAssembler(inputCols=["ScaledNumFeatures", "CatFeatures"], outputCol="Features")
pipeline = Pipeline().setStages([product_indexer, partner_indexer, hasher,
vector_assembler, scaler, final_assembler])
transformedDf = pipeline.fit(df).transform(df).select("Sale", "ProductCountryIndexed", "PartnerIdIndexed",
"Features")
asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))
transformedDf = transformedDf.withColumn("Features", asDense("Features"))
def extract(row):
return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed,) + tuple(row.Features)
transformedDf = transformedDf.rdd.map(extract).toDF(["Sale", "ProductCountryIndexed", "PartnerIdIndexed"])
transformedDf.write.partitionBy("ProductCountryIndexed").csv(base_path + "country/")
|
normal
|
{
"blob_id": "203e678d565753bb51e1bbd90ffec0f3260b22fb",
"index": 119,
"step-1": "<mask token>\n\n\ndef line_count(filename, hdfs_used=False, client=None):\n if hdfs_used:\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\n<mask token>\n\n\ndef check_hdfs_path(path):\n return path.startswith('hdfs')\n\n\nclass Metadata(object):\n\n def __init__(self, num_groups, data_type, dimension):\n self.num_groups = num_groups\n self.data_type = data_type\n self.dimension = dimension\n self.groups = dict()\n\n def add_group(self, name, path, val_frac):\n print('group', name)\n self.groups[name] = dict()\n all_partitions = []\n hdfs_used = check_hdfs_path(path)\n client = None\n if hdfs_used:\n client = InsecureClient('http://namenode:9870')\n path = path[len('hdfs://'):]\n\n def fetch_info(filename):\n if filename.endswith('.csv'):\n file = path + filename\n return {'file_path': file, 'num_examples': ray.get(\n line_count_remote.remote(file, hdfs_used))}\n if filename.endswith('.png'):\n file = path + filename\n return {'file_path': file, 'num_examples': 1}\n if hdfs_used:\n files = client.list(path)\n executor = ThreadPoolExecutor(max_workers=160)\n all_partitions += [i for i in executor.map(fetch_info, files) if\n i is not None]\n else:\n for root, dirs, files in os.walk(path):\n for filename in files:\n if filename.endswith('.csv'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': line_count(file)})\n if filename.endswith('.png'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': 1})\n num_files = len(all_partitions)\n if val_frac * num_files < 1:\n df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(\n path + '*.csv')], ignore_index=True)\n num_examples = df.shape[0]\n val_examples = int(num_examples * val_frac)\n val = df[:val_examples if val_examples != 0 else 1]\n train = df[val_examples:]\n for f in glob.glob(path + '*'):\n os.remove(f)\n train.to_csv(path + 'train.csv', index=False)\n val.to_csv(path + 'val.csv', index=False)\n self.groups[name]['train_files'] = [{'file_path': path +\n 'train.csv', 'num_examples': train.shape[0]}]\n self.groups[name]['val_files'] = [{'file_path': path +\n 'val.csv', 'num_examples': val.shape[0]}]\n self.groups[name]['total_examples'] = train.shape[0]\n else:\n num_val_files = int(val_frac * num_files)\n self.groups[name]['train_files'] = all_partitions[:num_files -\n num_val_files]\n self.groups[name]['val_files'] = all_partitions[num_files -\n num_val_files:]\n self.groups[name]['total_examples'] = sum([p['num_examples'] for\n p in self.groups[name]['train_files']])\n\n def to_json(self):\n return {'num_groups': self.num_groups, 'type': self.data_type,\n 'dimension': self.dimension, 'groups': self.groups}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef line_count(filename, hdfs_used=False, client=None):\n if hdfs_used:\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\[email protected](num_cpus=1, resources={'CustomResource': 1})\ndef line_count_remote(filename, hdfs_used=False, client=None):\n if hdfs_used:\n if client is None:\n client = InsecureClient('http://namenode:9870')\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\ndef check_hdfs_path(path):\n return path.startswith('hdfs')\n\n\nclass Metadata(object):\n\n def __init__(self, num_groups, data_type, dimension):\n self.num_groups = num_groups\n self.data_type = data_type\n self.dimension = dimension\n self.groups = dict()\n\n def add_group(self, name, path, val_frac):\n print('group', name)\n self.groups[name] = dict()\n all_partitions = []\n hdfs_used = check_hdfs_path(path)\n client = None\n if hdfs_used:\n client = InsecureClient('http://namenode:9870')\n path = path[len('hdfs://'):]\n\n def fetch_info(filename):\n if filename.endswith('.csv'):\n file = path + filename\n return {'file_path': file, 'num_examples': ray.get(\n line_count_remote.remote(file, hdfs_used))}\n if filename.endswith('.png'):\n file = path + filename\n return {'file_path': file, 'num_examples': 1}\n if hdfs_used:\n files = client.list(path)\n executor = ThreadPoolExecutor(max_workers=160)\n all_partitions += [i for i in executor.map(fetch_info, files) if\n i is not None]\n else:\n for root, dirs, files in os.walk(path):\n for filename in files:\n if filename.endswith('.csv'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': line_count(file)})\n if filename.endswith('.png'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': 1})\n num_files = len(all_partitions)\n if val_frac * num_files < 1:\n df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(\n path + '*.csv')], ignore_index=True)\n num_examples = df.shape[0]\n val_examples = int(num_examples * val_frac)\n val = df[:val_examples if val_examples != 0 else 1]\n train = df[val_examples:]\n for f in glob.glob(path + '*'):\n os.remove(f)\n train.to_csv(path + 'train.csv', index=False)\n val.to_csv(path + 'val.csv', index=False)\n self.groups[name]['train_files'] = [{'file_path': path +\n 'train.csv', 'num_examples': train.shape[0]}]\n self.groups[name]['val_files'] = [{'file_path': path +\n 'val.csv', 'num_examples': val.shape[0]}]\n self.groups[name]['total_examples'] = train.shape[0]\n else:\n num_val_files = int(val_frac * num_files)\n self.groups[name]['train_files'] = all_partitions[:num_files -\n num_val_files]\n self.groups[name]['val_files'] = all_partitions[num_files -\n num_val_files:]\n self.groups[name]['total_examples'] = sum([p['num_examples'] for\n p in self.groups[name]['train_files']])\n\n def to_json(self):\n return {'num_groups': self.num_groups, 'type': self.data_type,\n 'dimension': self.dimension, 'groups': self.groups}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef line_count(filename, hdfs_used=False, client=None):\n if hdfs_used:\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\[email protected](num_cpus=1, resources={'CustomResource': 1})\ndef line_count_remote(filename, hdfs_used=False, client=None):\n if hdfs_used:\n if client is None:\n client = InsecureClient('http://namenode:9870')\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\ndef check_hdfs_path(path):\n return path.startswith('hdfs')\n\n\nclass Metadata(object):\n\n def __init__(self, num_groups, data_type, dimension):\n self.num_groups = num_groups\n self.data_type = data_type\n self.dimension = dimension\n self.groups = dict()\n\n def add_group(self, name, path, val_frac):\n print('group', name)\n self.groups[name] = dict()\n all_partitions = []\n hdfs_used = check_hdfs_path(path)\n client = None\n if hdfs_used:\n client = InsecureClient('http://namenode:9870')\n path = path[len('hdfs://'):]\n\n def fetch_info(filename):\n if filename.endswith('.csv'):\n file = path + filename\n return {'file_path': file, 'num_examples': ray.get(\n line_count_remote.remote(file, hdfs_used))}\n if filename.endswith('.png'):\n file = path + filename\n return {'file_path': file, 'num_examples': 1}\n if hdfs_used:\n files = client.list(path)\n executor = ThreadPoolExecutor(max_workers=160)\n all_partitions += [i for i in executor.map(fetch_info, files) if\n i is not None]\n else:\n for root, dirs, files in os.walk(path):\n for filename in files:\n if filename.endswith('.csv'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': line_count(file)})\n if filename.endswith('.png'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': 1})\n num_files = len(all_partitions)\n if val_frac * num_files < 1:\n df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(\n path + '*.csv')], ignore_index=True)\n num_examples = df.shape[0]\n val_examples = int(num_examples * val_frac)\n val = df[:val_examples if val_examples != 0 else 1]\n train = df[val_examples:]\n for f in glob.glob(path + '*'):\n os.remove(f)\n train.to_csv(path + 'train.csv', index=False)\n val.to_csv(path + 'val.csv', index=False)\n self.groups[name]['train_files'] = [{'file_path': path +\n 'train.csv', 'num_examples': train.shape[0]}]\n self.groups[name]['val_files'] = [{'file_path': path +\n 'val.csv', 'num_examples': val.shape[0]}]\n self.groups[name]['total_examples'] = train.shape[0]\n else:\n num_val_files = int(val_frac * num_files)\n self.groups[name]['train_files'] = all_partitions[:num_files -\n num_val_files]\n self.groups[name]['val_files'] = all_partitions[num_files -\n num_val_files:]\n self.groups[name]['total_examples'] = sum([p['num_examples'] for\n p in self.groups[name]['train_files']])\n\n def to_json(self):\n return {'num_groups': self.num_groups, 'type': self.data_type,\n 'dimension': self.dimension, 'groups': self.groups}\n\n\ndef criteo_etl(base_path, in_file, block_size, val_frac):\n spark = SparkSession.builder.appName('PreprocessCriteoData').config(\n 'spark.sql.files.maxRecordsPerFile', int(block_size / (4 * (4096 * \n 2 + 4) / 1024 / 1024))).getOrCreate()\n sc = spark.sparkContext\n field = [StructField('Sale', IntegerType(), True), StructField(\n 'SalesAmount', FloatType(), True), StructField('ConversionDelay',\n FloatType(), True), StructField('ClickTimestamp', StringType(), \n True), StructField('NumClicksPerWeek', FloatType(), True),\n StructField('ProductPrice', FloatType(), True), StructField(\n 'ProductAgeGroup', StringType(), True), StructField('DeviceType',\n StringType(), True), StructField('AudienceId', StringType(), True),\n StructField('ProductGender', StringType(), True), StructField(\n 'ProductBrand', StringType(), True), StructField('ProductCategory1',\n StringType(), True), StructField('ProductCategory2', StringType(), \n True), StructField('ProductCategory3', StringType(), True),\n StructField('ProductCategory4', StringType(), True), StructField(\n 'ProductCategory5', StringType(), True), StructField(\n 'ProductCategory6', StringType(), True), StructField(\n 'ProductCategory7', StringType(), True), StructField(\n 'ProductCountry', StringType(), True), StructField('ProductId',\n StringType(), True), StructField('ProductTitle', StringType(), True\n ), StructField('PartnerId', StringType(), True), StructField(\n 'UserId', StringType(), True)]\n schema = StructType(field)\n df = spark.read.format('csv').option('delimiter', '\\t').schema(schema\n ).load(base_path + in_file)\n hasher = FeatureHasher(numFeatures=4096 * 2, inputCols=[\n 'ProductAgeGroup', 'DeviceType', 'AudienceId', 'ProductGender',\n 'ProductBrand', 'ProductCategory1', 'ProductCategory2',\n 'ProductCategory3', 'ProductCategory4', 'ProductCategory5',\n 'ProductCategory6', 'ProductCategory7', 'ProductId', 'UserId'],\n outputCol='CatFeatures')\n product_indexer = StringIndexer(inputCol='ProductCountry', outputCol=\n 'ProductCountryIndexed')\n partner_indexer = StringIndexer(inputCol='PartnerId', outputCol=\n 'PartnerIdIndexed')\n vector_assembler = VectorAssembler(inputCols=['NumClicksPerWeek',\n 'ProductPrice'], outputCol='NumFeatures')\n scaler = StandardScaler(inputCol='NumFeatures', outputCol=\n 'ScaledNumFeatures')\n final_assembler = VectorAssembler(inputCols=['ScaledNumFeatures',\n 'CatFeatures'], outputCol='Features')\n pipeline = Pipeline().setStages([product_indexer, partner_indexer,\n hasher, vector_assembler, scaler, final_assembler])\n transformedDf = pipeline.fit(df).transform(df).select('Sale',\n 'ProductCountryIndexed', 'PartnerIdIndexed', 'Features')\n asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))\n transformedDf = transformedDf.withColumn('Features', asDense('Features'))\n\n def extract(row):\n return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed\n ) + tuple(row.Features)\n transformedDf = transformedDf.rdd.map(extract).toDF(['Sale',\n 'ProductCountryIndexed', 'PartnerIdIndexed'])\n transformedDf.write.partitionBy('ProductCountryIndexed').csv(base_path +\n 'country/')\n",
"step-4": "from pyspark.sql import SparkSession\nfrom pyspark.ml.feature import FeatureHasher, StringIndexer, VectorAssembler, StandardScaler\nfrom pyspark.sql.types import *\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.functions import udf\nimport random\nimport pandas as pd\nimport os\nimport subprocess\nimport glob\nimport json\nfrom hdfs import InsecureClient\nimport ray\nfrom concurrent.futures import ThreadPoolExecutor\n\n\ndef line_count(filename, hdfs_used=False, client=None):\n if hdfs_used:\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\[email protected](num_cpus=1, resources={'CustomResource': 1})\ndef line_count_remote(filename, hdfs_used=False, client=None):\n if hdfs_used:\n if client is None:\n client = InsecureClient('http://namenode:9870')\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\ndef check_hdfs_path(path):\n return path.startswith('hdfs')\n\n\nclass Metadata(object):\n\n def __init__(self, num_groups, data_type, dimension):\n self.num_groups = num_groups\n self.data_type = data_type\n self.dimension = dimension\n self.groups = dict()\n\n def add_group(self, name, path, val_frac):\n print('group', name)\n self.groups[name] = dict()\n all_partitions = []\n hdfs_used = check_hdfs_path(path)\n client = None\n if hdfs_used:\n client = InsecureClient('http://namenode:9870')\n path = path[len('hdfs://'):]\n\n def fetch_info(filename):\n if filename.endswith('.csv'):\n file = path + filename\n return {'file_path': file, 'num_examples': ray.get(\n line_count_remote.remote(file, hdfs_used))}\n if filename.endswith('.png'):\n file = path + filename\n return {'file_path': file, 'num_examples': 1}\n if hdfs_used:\n files = client.list(path)\n executor = ThreadPoolExecutor(max_workers=160)\n all_partitions += [i for i in executor.map(fetch_info, files) if\n i is not None]\n else:\n for root, dirs, files in os.walk(path):\n for filename in files:\n if filename.endswith('.csv'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': line_count(file)})\n if filename.endswith('.png'):\n file = path + filename\n all_partitions.append({'file_path': file,\n 'num_examples': 1})\n num_files = len(all_partitions)\n if val_frac * num_files < 1:\n df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(\n path + '*.csv')], ignore_index=True)\n num_examples = df.shape[0]\n val_examples = int(num_examples * val_frac)\n val = df[:val_examples if val_examples != 0 else 1]\n train = df[val_examples:]\n for f in glob.glob(path + '*'):\n os.remove(f)\n train.to_csv(path + 'train.csv', index=False)\n val.to_csv(path + 'val.csv', index=False)\n self.groups[name]['train_files'] = [{'file_path': path +\n 'train.csv', 'num_examples': train.shape[0]}]\n self.groups[name]['val_files'] = [{'file_path': path +\n 'val.csv', 'num_examples': val.shape[0]}]\n self.groups[name]['total_examples'] = train.shape[0]\n else:\n num_val_files = int(val_frac * num_files)\n self.groups[name]['train_files'] = all_partitions[:num_files -\n num_val_files]\n self.groups[name]['val_files'] = all_partitions[num_files -\n num_val_files:]\n self.groups[name]['total_examples'] = sum([p['num_examples'] for\n p in self.groups[name]['train_files']])\n\n def to_json(self):\n return {'num_groups': self.num_groups, 'type': self.data_type,\n 'dimension': self.dimension, 'groups': self.groups}\n\n\ndef criteo_etl(base_path, in_file, block_size, val_frac):\n spark = SparkSession.builder.appName('PreprocessCriteoData').config(\n 'spark.sql.files.maxRecordsPerFile', int(block_size / (4 * (4096 * \n 2 + 4) / 1024 / 1024))).getOrCreate()\n sc = spark.sparkContext\n field = [StructField('Sale', IntegerType(), True), StructField(\n 'SalesAmount', FloatType(), True), StructField('ConversionDelay',\n FloatType(), True), StructField('ClickTimestamp', StringType(), \n True), StructField('NumClicksPerWeek', FloatType(), True),\n StructField('ProductPrice', FloatType(), True), StructField(\n 'ProductAgeGroup', StringType(), True), StructField('DeviceType',\n StringType(), True), StructField('AudienceId', StringType(), True),\n StructField('ProductGender', StringType(), True), StructField(\n 'ProductBrand', StringType(), True), StructField('ProductCategory1',\n StringType(), True), StructField('ProductCategory2', StringType(), \n True), StructField('ProductCategory3', StringType(), True),\n StructField('ProductCategory4', StringType(), True), StructField(\n 'ProductCategory5', StringType(), True), StructField(\n 'ProductCategory6', StringType(), True), StructField(\n 'ProductCategory7', StringType(), True), StructField(\n 'ProductCountry', StringType(), True), StructField('ProductId',\n StringType(), True), StructField('ProductTitle', StringType(), True\n ), StructField('PartnerId', StringType(), True), StructField(\n 'UserId', StringType(), True)]\n schema = StructType(field)\n df = spark.read.format('csv').option('delimiter', '\\t').schema(schema\n ).load(base_path + in_file)\n hasher = FeatureHasher(numFeatures=4096 * 2, inputCols=[\n 'ProductAgeGroup', 'DeviceType', 'AudienceId', 'ProductGender',\n 'ProductBrand', 'ProductCategory1', 'ProductCategory2',\n 'ProductCategory3', 'ProductCategory4', 'ProductCategory5',\n 'ProductCategory6', 'ProductCategory7', 'ProductId', 'UserId'],\n outputCol='CatFeatures')\n product_indexer = StringIndexer(inputCol='ProductCountry', outputCol=\n 'ProductCountryIndexed')\n partner_indexer = StringIndexer(inputCol='PartnerId', outputCol=\n 'PartnerIdIndexed')\n vector_assembler = VectorAssembler(inputCols=['NumClicksPerWeek',\n 'ProductPrice'], outputCol='NumFeatures')\n scaler = StandardScaler(inputCol='NumFeatures', outputCol=\n 'ScaledNumFeatures')\n final_assembler = VectorAssembler(inputCols=['ScaledNumFeatures',\n 'CatFeatures'], outputCol='Features')\n pipeline = Pipeline().setStages([product_indexer, partner_indexer,\n hasher, vector_assembler, scaler, final_assembler])\n transformedDf = pipeline.fit(df).transform(df).select('Sale',\n 'ProductCountryIndexed', 'PartnerIdIndexed', 'Features')\n asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))\n transformedDf = transformedDf.withColumn('Features', asDense('Features'))\n\n def extract(row):\n return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed\n ) + tuple(row.Features)\n transformedDf = transformedDf.rdd.map(extract).toDF(['Sale',\n 'ProductCountryIndexed', 'PartnerIdIndexed'])\n transformedDf.write.partitionBy('ProductCountryIndexed').csv(base_path +\n 'country/')\n",
"step-5": "#Copyright 2020 Side Li, Arun Kumar\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.feature import FeatureHasher, StringIndexer, VectorAssembler, StandardScaler\nfrom pyspark.sql.types import *\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.functions import udf\nimport random\nimport pandas as pd\nimport os\nimport subprocess\nimport glob\nimport json\nfrom hdfs import InsecureClient\nimport ray\nfrom concurrent.futures import ThreadPoolExecutor\n\n\ndef line_count(filename, hdfs_used=False, client=None):\n if hdfs_used:\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\[email protected](num_cpus=1, resources={\"CustomResource\":1})\ndef line_count_remote(filename, hdfs_used=False, client=None):\n if hdfs_used:\n if client is None:\n client = InsecureClient('http://namenode:9870')\n with client.read(filename, encoding='utf-8') as reader:\n num_lines = sum(1 for _ in reader)\n return num_lines\n else:\n return int(subprocess.check_output(['wc', '-l', filename]).split()[0])\n\n\ndef check_hdfs_path(path):\n return path.startswith(\"hdfs\")\n\n\nclass Metadata(object):\n def __init__(self, num_groups, data_type, dimension):\n self.num_groups = num_groups\n self.data_type = data_type\n self.dimension = dimension\n self.groups = dict()\n\n def add_group(self, name, path, val_frac):\n print(\"group\", name)\n self.groups[name] = dict()\n all_partitions = []\n hdfs_used = check_hdfs_path(path)\n client = None\n if hdfs_used:\n client = InsecureClient('http://namenode:9870')\n path = path[len(\"hdfs://\"):]\n\n def fetch_info(filename):\n if filename.endswith(\".csv\"):\n file = path + filename\n return {\n \"file_path\": file,\n \"num_examples\": ray.get(line_count_remote.remote(file, hdfs_used))\n }\n if filename.endswith(\".png\"):\n file = path + filename\n return {\n \"file_path\": file,\n \"num_examples\": 1\n }\n if hdfs_used:\n files = client.list(path)\n executor = ThreadPoolExecutor(max_workers=160)\n all_partitions += [i for i in executor.map(fetch_info, files) if i is not None]\n else:\n for root, dirs, files in os.walk(path):\n for filename in files:\n if filename.endswith(\".csv\"):\n file = path + filename\n all_partitions.append({\n \"file_path\": file,\n \"num_examples\": line_count(file)\n })\n if filename.endswith(\".png\"):\n file = path + filename\n all_partitions.append({\n \"file_path\": file,\n \"num_examples\": 1\n })\n\n num_files = len(all_partitions)\n if val_frac * num_files < 1:\n df = pd.concat([pd.read_csv(f, header=None) for f in glob.glob(path + \"*.csv\")], ignore_index=True)\n num_examples = df.shape[0]\n val_examples = int(num_examples * val_frac)\n val = df[:(val_examples if val_examples != 0 else 1)]\n train = df[val_examples:]\n for f in glob.glob(path + \"*\"):\n os.remove(f)\n train.to_csv(path + \"train.csv\", index=False)\n val.to_csv(path + \"val.csv\", index=False)\n self.groups[name][\"train_files\"] = [{\n \"file_path\": path + \"train.csv\",\n \"num_examples\": train.shape[0]\n }]\n self.groups[name][\"val_files\"] = [{\n \"file_path\": path + \"val.csv\",\n \"num_examples\": val.shape[0]\n }]\n self.groups[name][\"total_examples\"] = train.shape[0]\n else:\n num_val_files = int(val_frac * num_files)\n self.groups[name][\"train_files\"] = all_partitions[:num_files - num_val_files]\n self.groups[name][\"val_files\"] = all_partitions[num_files - num_val_files:]\n self.groups[name][\"total_examples\"] = sum([p[\"num_examples\"] for p in self.groups[name][\"train_files\"]])\n\n def to_json(self):\n return {\n \"num_groups\": self.num_groups,\n \"type\": self.data_type,\n \"dimension\": self.dimension,\n \"groups\": self.groups\n }\n\n\ndef criteo_etl(base_path, in_file, block_size, val_frac):\n spark = SparkSession\\\n .builder\\\n .appName(\"PreprocessCriteoData\") \\\n .config(\"spark.sql.files.maxRecordsPerFile\", int(block_size/(4 * (4096*2 + 4) / 1024 / 1024)))\\\n .getOrCreate()\n sc = spark.sparkContext\n\n field = [StructField(\"Sale\", IntegerType(), True),\n StructField(\"SalesAmount\", FloatType(), True),\n StructField(\"ConversionDelay\", FloatType(), True),\n StructField(\"ClickTimestamp\", StringType(), True),\n StructField(\"NumClicksPerWeek\", FloatType(), True),\n StructField(\"ProductPrice\", FloatType(), True),\n StructField(\"ProductAgeGroup\", StringType(), True),\n StructField(\"DeviceType\", StringType(), True),\n StructField(\"AudienceId\", StringType(), True),\n StructField(\"ProductGender\", StringType(), True),\n StructField(\"ProductBrand\", StringType(), True),\n StructField(\"ProductCategory1\", StringType(), True),\n StructField(\"ProductCategory2\", StringType(), True),\n StructField(\"ProductCategory3\", StringType(), True),\n StructField(\"ProductCategory4\", StringType(), True),\n StructField(\"ProductCategory5\", StringType(), True),\n StructField(\"ProductCategory6\", StringType(), True),\n StructField(\"ProductCategory7\", StringType(), True),\n StructField(\"ProductCountry\", StringType(), True),\n StructField(\"ProductId\", StringType(), True),\n StructField(\"ProductTitle\", StringType(), True),\n StructField(\"PartnerId\", StringType(), True),\n StructField(\"UserId\", StringType(), True)]\n schema = StructType(field)\n\n df = spark.read.format(\"csv\").option(\"delimiter\", \"\\t\").schema(schema).load(base_path + in_file)\n hasher = FeatureHasher(numFeatures=4096 * 2,\n inputCols=[\"ProductAgeGroup\", \"DeviceType\", \"AudienceId\",\n \"ProductGender\", \"ProductBrand\", \"ProductCategory1\", \"ProductCategory2\",\n \"ProductCategory3\", \"ProductCategory4\", \"ProductCategory5\",\n \"ProductCategory6\", \"ProductCategory7\", \"ProductId\", \"UserId\"],\n outputCol=\"CatFeatures\")\n product_indexer = StringIndexer(inputCol=\"ProductCountry\", outputCol=\"ProductCountryIndexed\")\n partner_indexer = StringIndexer(inputCol=\"PartnerId\", outputCol=\"PartnerIdIndexed\")\n vector_assembler = VectorAssembler(inputCols=[\"NumClicksPerWeek\", \"ProductPrice\"], outputCol=\"NumFeatures\")\n scaler = StandardScaler(inputCol=\"NumFeatures\", outputCol=\"ScaledNumFeatures\")\n final_assembler = VectorAssembler(inputCols=[\"ScaledNumFeatures\", \"CatFeatures\"], outputCol=\"Features\")\n\n pipeline = Pipeline().setStages([product_indexer, partner_indexer, hasher,\n vector_assembler, scaler, final_assembler])\n transformedDf = pipeline.fit(df).transform(df).select(\"Sale\", \"ProductCountryIndexed\", \"PartnerIdIndexed\",\n \"Features\")\n\n asDense = udf(lambda v: v.toArray().tolist(), ArrayType(DoubleType()))\n transformedDf = transformedDf.withColumn(\"Features\", asDense(\"Features\"))\n\n def extract(row):\n return (row.Sale, row.ProductCountryIndexed, row.PartnerIdIndexed,) + tuple(row.Features)\n\n transformedDf = transformedDf.rdd.map(extract).toDF([\"Sale\", \"ProductCountryIndexed\", \"PartnerIdIndexed\"])\n\n transformedDf.write.partitionBy(\"ProductCountryIndexed\").csv(base_path + \"country/\")\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# Generated by Django 3.1.3 on 2020-11-18 13:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookstore', '0003_auto_20201118_1325'),
]
operations = [
migrations.AddField(
model_name='book',
name='seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "fa09937ce64952795ae27cb91bf2c52dfb3ef4da",
"index": 4532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]\n operations = [migrations.AddField(model_name='book', name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.\n deletion.CASCADE, to='auth.user'), preserve_default=False)]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]\n operations = [migrations.AddField(model_name='book', name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.\n deletion.CASCADE, to='auth.user'), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.1.3 on 2020-11-18 13:26\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('bookstore', '0003_auto_20201118_1325'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='book',\n name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
#return HttpRequest("Hi This is SAU5081 page.")
return render(request, "sau5081/sau5081.html")
|
normal
|
{
"blob_id": "ac1ac80739bed0cebf7a89a7d55e1b4fa6c68cdf",
"index": 3428,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'sau5081/sau5081.html')\n",
"step-3": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n\ndef index(request):\n return render(request, 'sau5081/sau5081.html')\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n\n# Create your views here.\ndef index(request):\n #return HttpRequest(\"Hi This is SAU5081 page.\")\n return render(request, \"sau5081/sau5081.html\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
{"filter":false,"title":"cash.py","tooltip":"/pset6/cash/cash.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":12,"column":7},"end":{"row":12,"column":8},"action":"insert","lines":[" "],"id":308},{"start":{"row":12,"column":8},"end":{"row":12,"column":9},"action":"insert","lines":[">"]}],[{"start":{"row":12,"column":9},"end":{"row":12,"column":10},"action":"insert","lines":[" "],"id":309}],[{"start":{"row":12,"column":9},"end":{"row":12,"column":10},"action":"remove","lines":[" "],"id":310}],[{"start":{"row":12,"column":9},"end":{"row":12,"column":10},"action":"insert","lines":["="],"id":311}],[{"start":{"row":12,"column":10},"end":{"row":12,"column":11},"action":"insert","lines":[" "],"id":312},{"start":{"row":12,"column":11},"end":{"row":12,"column":12},"action":"insert","lines":["q"]},{"start":{"row":12,"column":12},"end":{"row":12,"column":13},"action":"insert","lines":["u"]},{"start":{"row":12,"column":13},"end":{"row":12,"column":14},"action":"insert","lines":["a"]},{"start":{"row":12,"column":14},"end":{"row":12,"column":15},"action":"insert","lines":["r"]},{"start":{"row":12,"column":15},"end":{"row":12,"column":16},"action":"insert","lines":["t"]},{"start":{"row":12,"column":16},"end":{"row":12,"column":17},"action":"insert","lines":["e"]},{"start":{"row":12,"column":17},"end":{"row":12,"column":18},"action":"insert","lines":["r"]}],[{"start":{"row":14,"column":0},"end":{"row":15,"column":13},"action":"remove","lines":[" if c < quarter:"," break"],"id":313},{"start":{"row":13,"column":19},"end":{"row":14,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":16,"column":6},"end":{"row":16,"column":10},"action":"remove","lines":["True"],"id":314},{"start":{"row":16,"column":6},"end":{"row":16,"column":7},"action":"insert","lines":["c"]},{"start":{"row":16,"column":7},"end":{"row":16,"column":8},"action":"insert","lines":[">"]}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["="],"id":315}],[{"start":{"row":16,"column":9},"end":{"row":16,"column":10},"action":"insert","lines":[" "],"id":316},{"start":{"row":16,"column":10},"end":{"row":16,"column":11},"action":"insert","lines":["d"]},{"start":{"row":16,"column":11},"end":{"row":16,"column":12},"action":"insert","lines":["i"]},{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"insert","lines":["m"]},{"start":{"row":16,"column":13},"end":{"row":16,"column":14},"action":"insert","lines":["e"]}],[{"start":{"row":16,"column":7},"end":{"row":16,"column":8},"action":"insert","lines":[" "],"id":317}],[{"start":{"row":18,"column":0},"end":{"row":19,"column":13},"action":"remove","lines":[" if c < dime:"," break"],"id":318},{"start":{"row":17,"column":16},"end":{"row":18,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":20,"column":6},"end":{"row":20,"column":10},"action":"remove","lines":["True"],"id":319}],[{"start":{"row":20,"column":6},"end":{"row":20,"column":7},"action":"insert","lines":["c"],"id":320}],[{"start":{"row":20,"column":7},"end":{"row":20,"column":8},"action":"insert","lines":[">"],"id":321}],[{"start":{"row":20,"column":7},"end":{"row":20,"column":8},"action":"remove","lines":[">"],"id":322}],[{"start":{"row":20,"column":7},"end":{"row":20,"column":8},"action":"insert","lines":[" "],"id":323},{"start":{"row":20,"column":8},"end":{"row":20,"column":9},"action":"insert","lines":[">"]}],[{"start":{"row":20,"column":9},"end":{"row":20,"column":10},"action":"insert","lines":["="],"id":324}],[{"start":{"row":20,"column":10},"end":{"row":20,"column":11},"action":"insert","lines":[" "],"id":325},{"start":{"row":20,"column":11},"end":{"row":20,"column":12},"action":"insert","lines":["n"]},{"start":{"row":20,"column":12},"end":{"row":20,"column":13},"action":"insert","lines":["i"]},{"start":{"row":20,"column":13},"end":{"row":20,"column":14},"action":"insert","lines":["c"]},{"start":{"row":20,"column":14},"end":{"row":20,"column":15},"action":"insert","lines":["k"]},{"start":{"row":20,"column":15},"end":{"row":20,"column":16},"action":"insert","lines":["e"]},{"start":{"row":20,"column":16},"end":{"row":20,"column":17},"action":"insert","lines":["l"]}],[{"start":{"row":22,"column":1},"end":{"row":23,"column":13},"action":"remove","lines":[" if c < nickel:"," break"],"id":326},{"start":{"row":22,"column":0},"end":{"row":22,"column":1},"action":"remove","lines":[" "]},{"start":{"row":21,"column":18},"end":{"row":22,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":24,"column":9},"end":{"row":24,"column":10},"action":"remove","lines":["e"],"id":327},{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"remove","lines":["u"]},{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"remove","lines":["r"]},{"start":{"row":24,"column":6},"end":{"row":24,"column":7},"action":"remove","lines":["T"]}],[{"start":{"row":24,"column":6},"end":{"row":24,"column":7},"action":"insert","lines":["c"],"id":328}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":[" "],"id":329},{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":[">"]},{"start":{"row":24,"column":9},"end":{"row":24,"column":10},"action":"insert","lines":["="]}],[{"start":{"row":24,"column":10},"end":{"row":24,"column":11},"action":"insert","lines":[" "],"id":330},{"start":{"row":24,"column":11},"end":{"row":24,"column":12},"action":"insert","lines":["p"]},{"start":{"row":24,"column":12},"end":{"row":24,"column":13},"action":"insert","lines":["e"]},{"start":{"row":24,"column":13},"end":{"row":24,"column":14},"action":"insert","lines":["n"]},{"start":{"row":24,"column":14},"end":{"row":24,"column":15},"action":"insert","lines":["n"]},{"start":{"row":24,"column":15},"end":{"row":24,"column":16},"action":"insert","lines":["i"]}],[{"start":{"row":24,"column":16},"end":{"row":24,"column":17},"action":"insert","lines":["e"],"id":331}],[{"start":{"row":26,"column":3},"end":{"row":27,"column":13},"action":"remove","lines":[" if c < 0:"," break"],"id":332},{"start":{"row":26,"column":2},"end":{"row":26,"column":3},"action":"remove","lines":[" "]},{"start":{"row":26,"column":1},"end":{"row":26,"column":2},"action":"remove","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":1},"action":"remove","lines":[" "]},{"start":{"row":25,"column":18},"end":{"row":26,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":1,"column":0},"end":{"row":2,"column":0},"action":"insert","lines":["",""],"id":333},{"start":{"row":2,"column":0},"end":{"row":3,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":2,"column":0},"end":{"row":2,"column":1},"action":"insert","lines":["d"],"id":334},{"start":{"row":2,"column":1},"end":{"row":2,"column":2},"action":"insert","lines":["e"]},{"start":{"row":2,"column":2},"end":{"row":2,"column":3},"action":"insert","lines":["f"]}],[{"start":{"row":2,"column":3},"end":{"row":2,"column":4},"action":"insert","lines":[" "],"id":335},{"start":{"row":2,"column":4},"end":{"row":2,"column":5},"action":"insert","lines":["m"]},{"start":{"row":2,"column":5},"end":{"row":2,"column":6},"action":"insert","lines":["a"]},{"start":{"row":2,"column":6},"end":{"row":2,"column":7},"action":"insert","lines":["i"]},{"start":{"row":2,"column":7},"end":{"row":2,"column":8},"action":"insert","lines":["n"]}],[{"start":{"row":2,"column":8},"end":{"row":2,"column":10},"action":"insert","lines":["()"],"id":336}],[{"start":{"row":2,"column":10},"end":{"row":2,"column":11},"action":"insert","lines":[":"],"id":337}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":4},"action":"insert","lines":[" "],"id":338}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "],"id":339}],[{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "],"id":340}],[{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "],"id":341}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "],"id":342}],[{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "],"id":343}],[{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "],"id":344}],[{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "],"id":345}],[{"start":{"row":15,"column":4},"end":{"row":15,"column":8},"action":"insert","lines":[" "],"id":346}],[{"start":{"row":16,"column":4},"end":{"row":16,"column":8},"action":"insert","lines":[" "],"id":347}],[{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "],"id":348}],[{"start":{"row":19,"column":4},"end":{"row":19,"column":8},"action":"insert","lines":[" "],"id":349}],[{"start":{"row":20,"column":4},"end":{"row":20,"column":8},"action":"insert","lines":[" "],"id":350}],[{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "],"id":351}],[{"start":{"row":23,"column":4},"end":{"row":23,"column":8},"action":"insert","lines":[" "],"id":352}],[{"start":{"row":24,"column":4},"end":{"row":24,"column":8},"action":"insert","lines":[" "],"id":353}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":12},"action":"insert","lines":[" "],"id":354}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":12},"action":"remove","lines":[" "],"id":355}],[{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "],"id":356}],[{"start":{"row":27,"column":4},"end":{"row":27,"column":8},"action":"insert","lines":[" "],"id":357}],[{"start":{"row":28,"column":4},"end":{"row":28,"column":8},"action":"insert","lines":[" "],"id":358}],[{"start":{"row":30,"column":0},"end":{"row":30,"column":4},"action":"insert","lines":[" "],"id":359}],[{"start":{"row":30,"column":25},"end":{"row":31,"column":0},"action":"insert","lines":["",""],"id":360},{"start":{"row":31,"column":0},"end":{"row":31,"column":4},"action":"insert","lines":[" "]},{"start":{"row":31,"column":4},"end":{"row":32,"column":0},"action":"insert","lines":["",""]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "],"id":361}],[{"start":{"row":32,"column":0},"end":{"row":32,"column":1},"action":"insert","lines":["d"],"id":362},{"start":{"row":32,"column":1},"end":{"row":32,"column":2},"action":"insert","lines":["e"]},{"start":{"row":32,"column":2},"end":{"row":32,"column":3},"action":"insert","lines":["f"]}],[{"start":{"row":32,"column":3},"end":{"row":32,"column":4},"action":"insert","lines":[" "],"id":363}],[{"start":{"row":32,"column":4},"end":{"row":32,"column":5},"action":"insert","lines":["g"],"id":364},{"start":{"row":32,"column":5},"end":{"row":32,"column":6},"action":"insert","lines":["e"]},{"start":{"row":32,"column":6},"end":{"row":32,"column":7},"action":"insert","lines":["t"]},{"start":{"row":32,"column":7},"end":{"row":32,"column":8},"action":"insert","lines":["_"]},{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["p"]},{"start":{"row":32,"column":9},"end":{"row":32,"column":10},"action":"insert","lines":["o"]},{"start":{"row":32,"column":10},"end":{"row":32,"column":11},"action":"insert","lines":["s"]},{"start":{"row":32,"column":11},"end":{"row":32,"column":12},"action":"insert","lines":["i"]},{"start":{"row":32,"column":12},"end":{"row":32,"column":13},"action":"insert","lines":["t"]},{"start":{"row":32,"column":13},"end":{"row":32,"column":14},"action":"insert","lines":["i"]},{"start":{"row":32,"column":14},"end":{"row":32,"column":15},"action":"insert","lines":["v"]},{"start":{"row":32,"column":15},"end":{"row":32,"column":16},"action":"insert","lines":["e"]}],[{"start":{"row":32,"column":16},"end":{"row":32,"column":17},"action":"insert","lines":[" "],"id":365},{"start":{"row":32,"column":17},"end":{"row":32,"column":18},"action":"insert","lines":["c"]},{"start":{"row":32,"column":18},"end":{"row":32,"column":19},"action":"insert","lines":["h"]},{"start":{"row":32,"column":19},"end":{"row":32,"column":20},"action":"insert","lines":["a"]},{"start":{"row":32,"column":20},"end":{"row":32,"column":21},"action":"insert","lines":["n"]},{"start":{"row":32,"column":21},"end":{"row":32,"column":22},"action":"insert","lines":["g"]},{"start":{"row":32,"column":22},"end":{"row":32,"column":23},"action":"insert","lines":["e"]}],[{"start":{"row":32,"column":22},"end":{"row":32,"column":23},"action":"remove","lines":["e"],"id":366},{"start":{"row":32,"column":21},"end":{"row":32,"column":22},"action":"remove","lines":["g"]},{"start":{"row":32,"column":20},"end":{"row":32,"column":21},"action":"remove","lines":["n"]},{"start":{"row":32,"column":19},"end":{"row":32,"column":20},"action":"remove","lines":["a"]},{"start":{"row":32,"column":18},"end":{"row":32,"column":19},"action":"remove","lines":["h"]},{"start":{"row":32,"column":17},"end":{"row":32,"column":18},"action":"remove","lines":["c"]},{"start":{"row":32,"column":16},"end":{"row":32,"column":17},"action":"remove","lines":[" "]}],[{"start":{"row":32,"column":16},"end":{"row":32,"column":17},"action":"insert","lines":["_"],"id":367},{"start":{"row":32,"column":17},"end":{"row":32,"column":18},"action":"insert","lines":["c"]},{"start":{"row":32,"column":18},"end":{"row":32,"column":19},"action":"insert","lines":["h"]},{"start":{"row":32,"column":19},"end":{"row":32,"column":20},"action":"insert","lines":["a"]},{"start":{"row":32,"column":20},"end":{"row":32,"column":21},"action":"insert","lines":["n"]},{"start":{"row":32,"column":21},"end":{"row":32,"column":22},"action":"insert","lines":["g"]},{"start":{"row":32,"column":22},"end":{"row":32,"column":23},"action":"insert","lines":["e"]},{"start":{"row":32,"column":23},"end":{"row":32,"column":24},"action":"insert","lines":[":"]}],[{"start":{"row":32,"column":24},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":368},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":33,"column":4},"end":{"row":33,"column":8},"action":"insert","lines":[" "],"id":369}],[{"start":{"row":33,"column":4},"end":{"row":33,"column":8},"action":"remove","lines":[" "],"id":370}],[{"start":{"row":10,"column":13},"end":{"row":10,"column":39},"action":"remove","lines":["get_float(\"Change owed: \")"],"id":372}],[{"start":{"row":10,"column":13},"end":{"row":10,"column":14},"action":"insert","lines":["g"],"id":373},{"start":{"row":10,"column":14},"end":{"row":10,"column":15},"action":"insert","lines":["e"]},{"start":{"row":10,"column":15},"end":{"row":10,"column":16},"action":"insert","lines":["t"]},{"start":{"row":10,"column":16},"end":{"row":10,"column":17},"action":"insert","lines":["_"]},{"start":{"row":10,"column":17},"end":{"row":10,"column":18},"action":"insert","lines":["p"]},{"start":{"row":10,"column":18},"end":{"row":10,"column":19},"action":"insert","lines":["o"]},{"start":{"row":10,"column":19},"end":{"row":10,"column":20},"action":"insert","lines":["s"]},{"start":{"row":10,"column":20},"end":{"row":10,"column":21},"action":"insert","lines":["i"]},{"start":{"row":10,"column":21},"end":{"row":10,"column":22},"action":"insert","lines":["t"]},{"start":{"row":10,"column":22},"end":{"row":10,"column":23},"action":"insert","lines":["i"]},{"start":{"row":10,"column":23},"end":{"row":10,"column":24},"action":"insert","lines":["v"]}],[{"start":{"row":10,"column":24},"end":{"row":10,"column":25},"action":"insert","lines":["e"],"id":374}],[{"start":{"row":10,"column":25},"end":{"row":10,"column":26},"action":"insert","lines":["_"],"id":375},{"start":{"row":10,"column":26},"end":{"row":10,"column":27},"action":"insert","lines":["c"]},{"start":{"row":10,"column":27},"end":{"row":10,"column":28},"action":"insert","lines":["h"]},{"start":{"row":10,"column":28},"end":{"row":10,"column":29},"action":"insert","lines":["a"]},{"start":{"row":10,"column":29},"end":{"row":10,"column":30},"action":"insert","lines":["n"]},{"start":{"row":10,"column":30},"end":{"row":10,"column":31},"action":"insert","lines":["g"]},{"start":{"row":10,"column":31},"end":{"row":10,"column":32},"action":"insert","lines":["e"]}],[{"start":{"row":10,"column":32},"end":{"row":10,"column":34},"action":"insert","lines":["()"],"id":376}],[{"start":{"row":32,"column":24},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":377},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":33,"column":4},"end":{"row":33,"column":30},"action":"insert","lines":["get_float(\"Change owed: \")"],"id":378}],[{"start":{"row":32,"column":24},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":379},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"insert","lines":[" "]},{"start":{"row":33,"column":4},"end":{"row":33,"column":5},"action":"insert","lines":["w"]},{"start":{"row":33,"column":5},"end":{"row":33,"column":6},"action":"insert","lines":["h"]},{"start":{"row":33,"column":6},"end":{"row":33,"column":7},"action":"insert","lines":["i"]},{"start":{"row":33,"column":7},"end":{"row":33,"column":8},"action":"insert","lines":["l"]},{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"insert","lines":["e"]}],[{"start":{"row":33,"column":9},"end":{"row":33,"column":10},"action":"insert","lines":[" "],"id":380}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"insert","lines":[" "],"id":381}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["c"],"id":382}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["c"],"id":383}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["c"],"id":384},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["h"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["a"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["n"]},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["g"]},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["e"]}],[{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"remove","lines":["e"],"id":385},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"remove","lines":["g"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"remove","lines":["n"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"remove","lines":["a"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"remove","lines":["h"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["c"]}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["r"],"id":386},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["e"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["s"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["p"]},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["o"]},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["n"]},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"insert","lines":["s"]},{"start":{"row":34,"column":15},"end":{"row":34,"column":16},"action":"insert","lines":["e"]}],[{"start":{"row":34,"column":15},"end":{"row":34,"column":16},"action":"remove","lines":["e"],"id":387},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"remove","lines":["s"]},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"remove","lines":["n"]},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"remove","lines":["o"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"remove","lines":["p"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"remove","lines":["s"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"remove","lines":["e"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["r"]}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["r"],"id":388},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["e"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["s"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["p"]},{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["o"]},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["n"]},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"insert","lines":["s"]},{"start":{"row":34,"column":15},"end":{"row":34,"column":16},"action":"insert","lines":["e"]}],[{"start":{"row":34,"column":16},"end":{"row":34,"column":17},"action":"insert","lines":[" "],"id":389},{"start":{"row":34,"column":17},"end":{"row":34,"column":18},"action":"insert","lines":["="]}],[{"start":{"row":34,"column":18},"end":{"row":34,"column":19},"action":"insert","lines":[" "],"id":390}],[{"start":{"row":33,"column":10},"end":{"row":33,"column":11},"action":"insert","lines":["r"],"id":391},{"start":{"row":33,"column":11},"end":{"row":33,"column":12},"action":"insert","lines":["e"]},{"start":{"row":33,"column":12},"end":{"row":33,"column":13},"action":"insert","lines":["s"]},{"start":{"row":33,"column":13},"end":{"row":33,"column":14},"action":"insert","lines":["p"]},{"start":{"row":33,"column":14},"end":{"row":33,"column":15},"action":"insert","lines":["o"]},{"start":{"row":33,"column":15},"end":{"row":33,"column":16},"action":"insert","lines":["n"]},{"start":{"row":33,"column":16},"end":{"row":33,"column":17},"action":"insert","lines":["s"]},{"start":{"row":33,"column":17},"end":{"row":33,"column":18},"action":"insert","lines":["e"]}],[{"start":{"row":33,"column":18},"end":{"row":33,"column":19},"action":"insert","lines":[" "],"id":392},{"start":{"row":33,"column":19},"end":{"row":33,"column":20},"action":"insert","lines":["<"]}],[{"start":{"row":33,"column":20},"end":{"row":33,"column":21},"action":"insert","lines":[" "],"id":393}],[{"start":{"row":33,"column":21},"end":{"row":33,"column":22},"action":"insert","lines":["0"],"id":394}],[{"start":{"row":33,"column":22},"end":{"row":33,"column":23},"action":"insert","lines":[":"],"id":395}],[{"start":{"row":33,"column":4},"end":{"row":34,"column":45},"action":"remove","lines":["while response < 0:"," response = get_float(\"Change owed: \")"],"id":396},{"start":{"row":33,"column":4},"end":{"row":37,"column":12},"action":"insert","lines":["while True:"," n = get_int(\"Height: \")"," if n > 0 and n <= 8:"," break"," return n"]}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":28},"action":"remove","lines":["and n <= 8:"],"id":397},{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"remove","lines":[" "]}],[{"start":{"row":32,"column":23},"end":{"row":32,"column":24},"action":"insert","lines":["("],"id":398},{"start":{"row":32,"column":24},"end":{"row":32,"column":25},"action":"insert","lines":[")"]}],[{"start":{"row":38,"column":4},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":399},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "]},{"start":{"row":39,"column":4},"end":{"row":40,"column":0},"action":"insert","lines":["",""]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "],"id":400}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":1},"action":"insert","lines":["m"],"id":401}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":1},"action":"remove","lines":["m"],"id":402},{"start":{"row":39,"column":4},"end":{"row":40,"column":0},"action":"remove","lines":["",""]},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["m"],"id":403},{"start":{"row":39,"column":1},"end":{"row":39,"column":2},"action":"insert","lines":["a"]},{"start":{"row":39,"column":2},"end":{"row":39,"column":3},"action":"insert","lines":["i"]},{"start":{"row":39,"column":3},"end":{"row":39,"column":4},"action":"insert","lines":["n"]}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":6},"action":"insert","lines":["()"],"id":404}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":[":"],"id":405}],[{"start":{"row":34,"column":16},"end":{"row":34,"column":19},"action":"remove","lines":["int"],"id":406},{"start":{"row":34,"column":16},"end":{"row":34,"column":17},"action":"insert","lines":["f"]},{"start":{"row":34,"column":17},"end":{"row":34,"column":18},"action":"insert","lines":["l"]},{"start":{"row":34,"column":18},"end":{"row":34,"column":19},"action":"insert","lines":["o"]},{"start":{"row":34,"column":19},"end":{"row":34,"column":20},"action":"insert","lines":["a"]},{"start":{"row":34,"column":20},"end":{"row":34,"column":21},"action":"insert","lines":["t"]}],[{"start":{"row":34,"column":23},"end":{"row":34,"column":29},"action":"remove","lines":["Height"],"id":407},{"start":{"row":34,"column":23},"end":{"row":34,"column":24},"action":"insert","lines":["C"]},{"start":{"row":34,"column":24},"end":{"row":34,"column":25},"action":"insert","lines":["h"]},{"start":{"row":34,"column":25},"end":{"row":34,"column":26},"action":"insert","lines":["a"]},{"start":{"row":34,"column":26},"end":{"row":34,"column":27},"action":"insert","lines":["n"]},{"start":{"row":34,"column":27},"end":{"row":34,"column":28},"action":"insert","lines":["g"]},{"start":{"row":34,"column":28},"end":{"row":34,"column":29},"action":"insert","lines":["e"]}],[{"start":{"row":34,"column":29},"end":{"row":34,"column":30},"action":"insert","lines":[" "],"id":408},{"start":{"row":34,"column":30},"end":{"row":34,"column":31},"action":"insert","lines":["o"]},{"start":{"row":34,"column":31},"end":{"row":34,"column":32},"action":"insert","lines":["w"]},{"start":{"row":34,"column":32},"end":{"row":34,"column":33},"action":"insert","lines":["e"]},{"start":{"row":34,"column":33},"end":{"row":34,"column":34},"action":"insert","lines":["d"]}],[{"start":{"row":35,"column":14},"end":{"row":35,"column":15},"action":"insert","lines":["="],"id":409}]]},"ace":{"folds":[],"scrolltop":439.5,"scrollleft":0,"selection":{"start":{"row":35,"column":15},"end":{"row":35,"column":15},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":26,"state":"start","mode":"ace/mode/python"}},"timestamp":1595063222568,"hash":"5e254036c6adabee2cc8fd34ca538d47c56fc31e"}
|
normal
|
{
"blob_id": "d14c22ba6db90a93a19d61e105e31b3eb8f3a206",
"index": 1706,
"step-1": "<mask token>\n",
"step-2": "{'filter': false, 'title': 'cash.py', 'tooltip': '/pset6/cash/cash.py',\n 'undoManager': {'mark': 100, 'position': 100, 'stack': [[{'start': {\n 'row': 12, 'column': 7}, 'end': {'row': 12, 'column': 8}, 'action':\n 'insert', 'lines': [' '], 'id': 308}, {'start': {'row': 12, 'column': 8\n }, 'end': {'row': 12, 'column': 9}, 'action': 'insert', 'lines': ['>']}\n ], [{'start': {'row': 12, 'column': 9}, 'end': {'row': 12, 'column': 10\n }, 'action': 'insert', 'lines': [' '], 'id': 309}], [{'start': {'row': \n 12, 'column': 9}, 'end': {'row': 12, 'column': 10}, 'action': 'remove',\n 'lines': [' '], 'id': 310}], [{'start': {'row': 12, 'column': 9}, 'end':\n {'row': 12, 'column': 10}, 'action': 'insert', 'lines': ['='], 'id': \n 311}], [{'start': {'row': 12, 'column': 10}, 'end': {'row': 12,\n 'column': 11}, 'action': 'insert', 'lines': [' '], 'id': 312}, {'start':\n {'row': 12, 'column': 11}, 'end': {'row': 12, 'column': 12}, 'action':\n 'insert', 'lines': ['q']}, {'start': {'row': 12, 'column': 12}, 'end':\n {'row': 12, 'column': 13}, 'action': 'insert', 'lines': ['u']}, {\n 'start': {'row': 12, 'column': 13}, 'end': {'row': 12, 'column': 14},\n 'action': 'insert', 'lines': ['a']}, {'start': {'row': 12, 'column': 14\n }, 'end': {'row': 12, 'column': 15}, 'action': 'insert', 'lines': ['r']\n }, {'start': {'row': 12, 'column': 15}, 'end': {'row': 12, 'column': 16\n }, 'action': 'insert', 'lines': ['t']}, {'start': {'row': 12, 'column':\n 16}, 'end': {'row': 12, 'column': 17}, 'action': 'insert', 'lines': [\n 'e']}, {'start': {'row': 12, 'column': 17}, 'end': {'row': 12, 'column':\n 18}, 'action': 'insert', 'lines': ['r']}], [{'start': {'row': 14,\n 'column': 0}, 'end': {'row': 15, 'column': 13}, 'action': 'remove',\n 'lines': [' if c < quarter:', ' break'], 'id': 313}, {'start':\n {'row': 13, 'column': 19}, 'end': {'row': 14, 'column': 0}, 'action':\n 'remove', 'lines': ['', '']}], [{'start': {'row': 16, 'column': 6},\n 'end': {'row': 16, 'column': 10}, 'action': 'remove', 'lines': ['True'],\n 'id': 314}, {'start': {'row': 16, 'column': 6}, 'end': {'row': 16,\n 'column': 7}, 'action': 'insert', 'lines': ['c']}, {'start': {'row': 16,\n 'column': 7}, 'end': {'row': 16, 'column': 8}, 'action': 'insert',\n 'lines': ['>']}], [{'start': {'row': 16, 'column': 8}, 'end': {'row': \n 16, 'column': 9}, 'action': 'insert', 'lines': ['='], 'id': 315}], [{\n 'start': {'row': 16, 'column': 9}, 'end': {'row': 16, 'column': 10},\n 'action': 'insert', 'lines': [' '], 'id': 316}, {'start': {'row': 16,\n 'column': 10}, 'end': {'row': 16, 'column': 11}, 'action': 'insert',\n 'lines': ['d']}, {'start': {'row': 16, 'column': 11}, 'end': {'row': 16,\n 'column': 12}, 'action': 'insert', 'lines': ['i']}, {'start': {'row': \n 16, 'column': 12}, 'end': {'row': 16, 'column': 13}, 'action': 'insert',\n 'lines': ['m']}, {'start': {'row': 16, 'column': 13}, 'end': {'row': 16,\n 'column': 14}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row':\n 16, 'column': 7}, 'end': {'row': 16, 'column': 8}, 'action': 'insert',\n 'lines': [' '], 'id': 317}], [{'start': {'row': 18, 'column': 0}, 'end':\n {'row': 19, 'column': 13}, 'action': 'remove', 'lines': [\n ' if c < dime:', ' break'], 'id': 318}, {'start': {'row': 17,\n 'column': 16}, 'end': {'row': 18, 'column': 0}, 'action': 'remove',\n 'lines': ['', '']}], [{'start': {'row': 20, 'column': 6}, 'end': {'row':\n 20, 'column': 10}, 'action': 'remove', 'lines': ['True'], 'id': 319}],\n [{'start': {'row': 20, 'column': 6}, 'end': {'row': 20, 'column': 7},\n 'action': 'insert', 'lines': ['c'], 'id': 320}], [{'start': {'row': 20,\n 'column': 7}, 'end': {'row': 20, 'column': 8}, 'action': 'insert',\n 'lines': ['>'], 'id': 321}], [{'start': {'row': 20, 'column': 7}, 'end':\n {'row': 20, 'column': 8}, 'action': 'remove', 'lines': ['>'], 'id': 322\n }], [{'start': {'row': 20, 'column': 7}, 'end': {'row': 20, 'column': 8\n }, 'action': 'insert', 'lines': [' '], 'id': 323}, {'start': {'row': 20,\n 'column': 8}, 'end': {'row': 20, 'column': 9}, 'action': 'insert',\n 'lines': ['>']}], [{'start': {'row': 20, 'column': 9}, 'end': {'row': \n 20, 'column': 10}, 'action': 'insert', 'lines': ['='], 'id': 324}], [{\n 'start': {'row': 20, 'column': 10}, 'end': {'row': 20, 'column': 11},\n 'action': 'insert', 'lines': [' '], 'id': 325}, {'start': {'row': 20,\n 'column': 11}, 'end': {'row': 20, 'column': 12}, 'action': 'insert',\n 'lines': ['n']}, {'start': {'row': 20, 'column': 12}, 'end': {'row': 20,\n 'column': 13}, 'action': 'insert', 'lines': ['i']}, {'start': {'row': \n 20, 'column': 13}, 'end': {'row': 20, 'column': 14}, 'action': 'insert',\n 'lines': ['c']}, {'start': {'row': 20, 'column': 14}, 'end': {'row': 20,\n 'column': 15}, 'action': 'insert', 'lines': ['k']}, {'start': {'row': \n 20, 'column': 15}, 'end': {'row': 20, 'column': 16}, 'action': 'insert',\n 'lines': ['e']}, {'start': {'row': 20, 'column': 16}, 'end': {'row': 20,\n 'column': 17}, 'action': 'insert', 'lines': ['l']}], [{'start': {'row':\n 22, 'column': 1}, 'end': {'row': 23, 'column': 13}, 'action': 'remove',\n 'lines': [' if c < nickel:', ' break'], 'id': 326}, {'start':\n {'row': 22, 'column': 0}, 'end': {'row': 22, 'column': 1}, 'action':\n 'remove', 'lines': [' ']}, {'start': {'row': 21, 'column': 18}, 'end':\n {'row': 22, 'column': 0}, 'action': 'remove', 'lines': ['', '']}], [{\n 'start': {'row': 24, 'column': 9}, 'end': {'row': 24, 'column': 10},\n 'action': 'remove', 'lines': ['e'], 'id': 327}, {'start': {'row': 24,\n 'column': 8}, 'end': {'row': 24, 'column': 9}, 'action': 'remove',\n 'lines': ['u']}, {'start': {'row': 24, 'column': 7}, 'end': {'row': 24,\n 'column': 8}, 'action': 'remove', 'lines': ['r']}, {'start': {'row': 24,\n 'column': 6}, 'end': {'row': 24, 'column': 7}, 'action': 'remove',\n 'lines': ['T']}], [{'start': {'row': 24, 'column': 6}, 'end': {'row': \n 24, 'column': 7}, 'action': 'insert', 'lines': ['c'], 'id': 328}], [{\n 'start': {'row': 24, 'column': 7}, 'end': {'row': 24, 'column': 8},\n 'action': 'insert', 'lines': [' '], 'id': 329}, {'start': {'row': 24,\n 'column': 8}, 'end': {'row': 24, 'column': 9}, 'action': 'insert',\n 'lines': ['>']}, {'start': {'row': 24, 'column': 9}, 'end': {'row': 24,\n 'column': 10}, 'action': 'insert', 'lines': ['=']}], [{'start': {'row':\n 24, 'column': 10}, 'end': {'row': 24, 'column': 11}, 'action': 'insert',\n 'lines': [' '], 'id': 330}, {'start': {'row': 24, 'column': 11}, 'end':\n {'row': 24, 'column': 12}, 'action': 'insert', 'lines': ['p']}, {\n 'start': {'row': 24, 'column': 12}, 'end': {'row': 24, 'column': 13},\n 'action': 'insert', 'lines': ['e']}, {'start': {'row': 24, 'column': 13\n }, 'end': {'row': 24, 'column': 14}, 'action': 'insert', 'lines': ['n']\n }, {'start': {'row': 24, 'column': 14}, 'end': {'row': 24, 'column': 15\n }, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 24, 'column':\n 15}, 'end': {'row': 24, 'column': 16}, 'action': 'insert', 'lines': [\n 'i']}], [{'start': {'row': 24, 'column': 16}, 'end': {'row': 24,\n 'column': 17}, 'action': 'insert', 'lines': ['e'], 'id': 331}], [{\n 'start': {'row': 26, 'column': 3}, 'end': {'row': 27, 'column': 13},\n 'action': 'remove', 'lines': [' if c < 0:', ' break'], 'id': 332\n }, {'start': {'row': 26, 'column': 2}, 'end': {'row': 26, 'column': 3},\n 'action': 'remove', 'lines': [' ']}, {'start': {'row': 26, 'column': 1},\n 'end': {'row': 26, 'column': 2}, 'action': 'remove', 'lines': [' ']}, {\n 'start': {'row': 26, 'column': 0}, 'end': {'row': 26, 'column': 1},\n 'action': 'remove', 'lines': [' ']}, {'start': {'row': 25, 'column': 18\n }, 'end': {'row': 26, 'column': 0}, 'action': 'remove', 'lines': ['',\n '']}], [{'start': {'row': 1, 'column': 0}, 'end': {'row': 2, 'column': \n 0}, 'action': 'insert', 'lines': ['', ''], 'id': 333}, {'start': {'row':\n 2, 'column': 0}, 'end': {'row': 3, 'column': 0}, 'action': 'insert',\n 'lines': ['', '']}], [{'start': {'row': 2, 'column': 0}, 'end': {'row':\n 2, 'column': 1}, 'action': 'insert', 'lines': ['d'], 'id': 334}, {\n 'start': {'row': 2, 'column': 1}, 'end': {'row': 2, 'column': 2},\n 'action': 'insert', 'lines': ['e']}, {'start': {'row': 2, 'column': 2},\n 'end': {'row': 2, 'column': 3}, 'action': 'insert', 'lines': ['f']}], [\n {'start': {'row': 2, 'column': 3}, 'end': {'row': 2, 'column': 4},\n 'action': 'insert', 'lines': [' '], 'id': 335}, {'start': {'row': 2,\n 'column': 4}, 'end': {'row': 2, 'column': 5}, 'action': 'insert',\n 'lines': ['m']}, {'start': {'row': 2, 'column': 5}, 'end': {'row': 2,\n 'column': 6}, 'action': 'insert', 'lines': ['a']}, {'start': {'row': 2,\n 'column': 6}, 'end': {'row': 2, 'column': 7}, 'action': 'insert',\n 'lines': ['i']}, {'start': {'row': 2, 'column': 7}, 'end': {'row': 2,\n 'column': 8}, 'action': 'insert', 'lines': ['n']}], [{'start': {'row': \n 2, 'column': 8}, 'end': {'row': 2, 'column': 10}, 'action': 'insert',\n 'lines': ['()'], 'id': 336}], [{'start': {'row': 2, 'column': 10},\n 'end': {'row': 2, 'column': 11}, 'action': 'insert', 'lines': [':'],\n 'id': 337}], [{'start': {'row': 4, 'column': 0}, 'end': {'row': 4,\n 'column': 4}, 'action': 'insert', 'lines': [' '], 'id': 338}], [{\n 'start': {'row': 5, 'column': 0}, 'end': {'row': 5, 'column': 4},\n 'action': 'insert', 'lines': [' '], 'id': 339}], [{'start': {'row': \n 6, 'column': 0}, 'end': {'row': 6, 'column': 4}, 'action': 'insert',\n 'lines': [' '], 'id': 340}], [{'start': {'row': 7, 'column': 0},\n 'end': {'row': 7, 'column': 4}, 'action': 'insert', 'lines': [' '],\n 'id': 341}], [{'start': {'row': 8, 'column': 0}, 'end': {'row': 8,\n 'column': 4}, 'action': 'insert', 'lines': [' '], 'id': 342}], [{\n 'start': {'row': 10, 'column': 0}, 'end': {'row': 10, 'column': 4},\n 'action': 'insert', 'lines': [' '], 'id': 343}], [{'start': {'row': \n 12, 'column': 0}, 'end': {'row': 12, 'column': 4}, 'action': 'insert',\n 'lines': [' '], 'id': 344}], [{'start': {'row': 14, 'column': 0},\n 'end': {'row': 14, 'column': 4}, 'action': 'insert', 'lines': [' '],\n 'id': 345}], [{'start': {'row': 15, 'column': 4}, 'end': {'row': 15,\n 'column': 8}, 'action': 'insert', 'lines': [' '], 'id': 346}], [{\n 'start': {'row': 16, 'column': 4}, 'end': {'row': 16, 'column': 8},\n 'action': 'insert', 'lines': [' '], 'id': 347}], [{'start': {'row': \n 18, 'column': 0}, 'end': {'row': 18, 'column': 4}, 'action': 'insert',\n 'lines': [' '], 'id': 348}], [{'start': {'row': 19, 'column': 4},\n 'end': {'row': 19, 'column': 8}, 'action': 'insert', 'lines': [' '],\n 'id': 349}], [{'start': {'row': 20, 'column': 4}, 'end': {'row': 20,\n 'column': 8}, 'action': 'insert', 'lines': [' '], 'id': 350}], [{\n 'start': {'row': 22, 'column': 0}, 'end': {'row': 22, 'column': 4},\n 'action': 'insert', 'lines': [' '], 'id': 351}], [{'start': {'row': \n 23, 'column': 4}, 'end': {'row': 23, 'column': 8}, 'action': 'insert',\n 'lines': [' '], 'id': 352}], [{'start': {'row': 24, 'column': 4},\n 'end': {'row': 24, 'column': 8}, 'action': 'insert', 'lines': [' '],\n 'id': 353}], [{'start': {'row': 24, 'column': 8}, 'end': {'row': 24,\n 'column': 12}, 'action': 'insert', 'lines': [' '], 'id': 354}], [{\n 'start': {'row': 24, 'column': 8}, 'end': {'row': 24, 'column': 12},\n 'action': 'remove', 'lines': [' '], 'id': 355}], [{'start': {'row': \n 26, 'column': 0}, 'end': {'row': 26, 'column': 4}, 'action': 'insert',\n 'lines': [' '], 'id': 356}], [{'start': {'row': 27, 'column': 4},\n 'end': {'row': 27, 'column': 8}, 'action': 'insert', 'lines': [' '],\n 'id': 357}], [{'start': {'row': 28, 'column': 4}, 'end': {'row': 28,\n 'column': 8}, 'action': 'insert', 'lines': [' '], 'id': 358}], [{\n 'start': {'row': 30, 'column': 0}, 'end': {'row': 30, 'column': 4},\n 'action': 'insert', 'lines': [' '], 'id': 359}], [{'start': {'row': \n 30, 'column': 25}, 'end': {'row': 31, 'column': 0}, 'action': 'insert',\n 'lines': ['', ''], 'id': 360}, {'start': {'row': 31, 'column': 0},\n 'end': {'row': 31, 'column': 4}, 'action': 'insert', 'lines': [' ']},\n {'start': {'row': 31, 'column': 4}, 'end': {'row': 32, 'column': 0},\n 'action': 'insert', 'lines': ['', '']}, {'start': {'row': 32, 'column':\n 0}, 'end': {'row': 32, 'column': 4}, 'action': 'insert', 'lines': [\n ' ']}], [{'start': {'row': 32, 'column': 0}, 'end': {'row': 32,\n 'column': 4}, 'action': 'remove', 'lines': [' '], 'id': 361}], [{\n 'start': {'row': 32, 'column': 0}, 'end': {'row': 32, 'column': 1},\n 'action': 'insert', 'lines': ['d'], 'id': 362}, {'start': {'row': 32,\n 'column': 1}, 'end': {'row': 32, 'column': 2}, 'action': 'insert',\n 'lines': ['e']}, {'start': {'row': 32, 'column': 2}, 'end': {'row': 32,\n 'column': 3}, 'action': 'insert', 'lines': ['f']}], [{'start': {'row': \n 32, 'column': 3}, 'end': {'row': 32, 'column': 4}, 'action': 'insert',\n 'lines': [' '], 'id': 363}], [{'start': {'row': 32, 'column': 4}, 'end':\n {'row': 32, 'column': 5}, 'action': 'insert', 'lines': ['g'], 'id': 364\n }, {'start': {'row': 32, 'column': 5}, 'end': {'row': 32, 'column': 6},\n 'action': 'insert', 'lines': ['e']}, {'start': {'row': 32, 'column': 6},\n 'end': {'row': 32, 'column': 7}, 'action': 'insert', 'lines': ['t']}, {\n 'start': {'row': 32, 'column': 7}, 'end': {'row': 32, 'column': 8},\n 'action': 'insert', 'lines': ['_']}, {'start': {'row': 32, 'column': 8},\n 'end': {'row': 32, 'column': 9}, 'action': 'insert', 'lines': ['p']}, {\n 'start': {'row': 32, 'column': 9}, 'end': {'row': 32, 'column': 10},\n 'action': 'insert', 'lines': ['o']}, {'start': {'row': 32, 'column': 10\n }, 'end': {'row': 32, 'column': 11}, 'action': 'insert', 'lines': ['s']\n }, {'start': {'row': 32, 'column': 11}, 'end': {'row': 32, 'column': 12\n }, 'action': 'insert', 'lines': ['i']}, {'start': {'row': 32, 'column':\n 12}, 'end': {'row': 32, 'column': 13}, 'action': 'insert', 'lines': [\n 't']}, {'start': {'row': 32, 'column': 13}, 'end': {'row': 32, 'column':\n 14}, 'action': 'insert', 'lines': ['i']}, {'start': {'row': 32,\n 'column': 14}, 'end': {'row': 32, 'column': 15}, 'action': 'insert',\n 'lines': ['v']}, {'start': {'row': 32, 'column': 15}, 'end': {'row': 32,\n 'column': 16}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row':\n 32, 'column': 16}, 'end': {'row': 32, 'column': 17}, 'action': 'insert',\n 'lines': [' '], 'id': 365}, {'start': {'row': 32, 'column': 17}, 'end':\n {'row': 32, 'column': 18}, 'action': 'insert', 'lines': ['c']}, {\n 'start': {'row': 32, 'column': 18}, 'end': {'row': 32, 'column': 19},\n 'action': 'insert', 'lines': ['h']}, {'start': {'row': 32, 'column': 19\n }, 'end': {'row': 32, 'column': 20}, 'action': 'insert', 'lines': ['a']\n }, {'start': {'row': 32, 'column': 20}, 'end': {'row': 32, 'column': 21\n }, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 32, 'column':\n 21}, 'end': {'row': 32, 'column': 22}, 'action': 'insert', 'lines': [\n 'g']}, {'start': {'row': 32, 'column': 22}, 'end': {'row': 32, 'column':\n 23}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row': 32,\n 'column': 22}, 'end': {'row': 32, 'column': 23}, 'action': 'remove',\n 'lines': ['e'], 'id': 366}, {'start': {'row': 32, 'column': 21}, 'end':\n {'row': 32, 'column': 22}, 'action': 'remove', 'lines': ['g']}, {\n 'start': {'row': 32, 'column': 20}, 'end': {'row': 32, 'column': 21},\n 'action': 'remove', 'lines': ['n']}, {'start': {'row': 32, 'column': 19\n }, 'end': {'row': 32, 'column': 20}, 'action': 'remove', 'lines': ['a']\n }, {'start': {'row': 32, 'column': 18}, 'end': {'row': 32, 'column': 19\n }, 'action': 'remove', 'lines': ['h']}, {'start': {'row': 32, 'column':\n 17}, 'end': {'row': 32, 'column': 18}, 'action': 'remove', 'lines': [\n 'c']}, {'start': {'row': 32, 'column': 16}, 'end': {'row': 32, 'column':\n 17}, 'action': 'remove', 'lines': [' ']}], [{'start': {'row': 32,\n 'column': 16}, 'end': {'row': 32, 'column': 17}, 'action': 'insert',\n 'lines': ['_'], 'id': 367}, {'start': {'row': 32, 'column': 17}, 'end':\n {'row': 32, 'column': 18}, 'action': 'insert', 'lines': ['c']}, {\n 'start': {'row': 32, 'column': 18}, 'end': {'row': 32, 'column': 19},\n 'action': 'insert', 'lines': ['h']}, {'start': {'row': 32, 'column': 19\n }, 'end': {'row': 32, 'column': 20}, 'action': 'insert', 'lines': ['a']\n }, {'start': {'row': 32, 'column': 20}, 'end': {'row': 32, 'column': 21\n }, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 32, 'column':\n 21}, 'end': {'row': 32, 'column': 22}, 'action': 'insert', 'lines': [\n 'g']}, {'start': {'row': 32, 'column': 22}, 'end': {'row': 32, 'column':\n 23}, 'action': 'insert', 'lines': ['e']}, {'start': {'row': 32,\n 'column': 23}, 'end': {'row': 32, 'column': 24}, 'action': 'insert',\n 'lines': [':']}], [{'start': {'row': 32, 'column': 24}, 'end': {'row': \n 33, 'column': 0}, 'action': 'insert', 'lines': ['', ''], 'id': 368}, {\n 'start': {'row': 33, 'column': 0}, 'end': {'row': 33, 'column': 4},\n 'action': 'insert', 'lines': [' ']}], [{'start': {'row': 33,\n 'column': 4}, 'end': {'row': 33, 'column': 8}, 'action': 'insert',\n 'lines': [' '], 'id': 369}], [{'start': {'row': 33, 'column': 4},\n 'end': {'row': 33, 'column': 8}, 'action': 'remove', 'lines': [' '],\n 'id': 370}], [{'start': {'row': 10, 'column': 13}, 'end': {'row': 10,\n 'column': 39}, 'action': 'remove', 'lines': [\n 'get_float(\"Change owed: \")'], 'id': 372}], [{'start': {'row': 10,\n 'column': 13}, 'end': {'row': 10, 'column': 14}, 'action': 'insert',\n 'lines': ['g'], 'id': 373}, {'start': {'row': 10, 'column': 14}, 'end':\n {'row': 10, 'column': 15}, 'action': 'insert', 'lines': ['e']}, {\n 'start': {'row': 10, 'column': 15}, 'end': {'row': 10, 'column': 16},\n 'action': 'insert', 'lines': ['t']}, {'start': {'row': 10, 'column': 16\n }, 'end': {'row': 10, 'column': 17}, 'action': 'insert', 'lines': ['_']\n }, {'start': {'row': 10, 'column': 17}, 'end': {'row': 10, 'column': 18\n }, 'action': 'insert', 'lines': ['p']}, {'start': {'row': 10, 'column':\n 18}, 'end': {'row': 10, 'column': 19}, 'action': 'insert', 'lines': [\n 'o']}, {'start': {'row': 10, 'column': 19}, 'end': {'row': 10, 'column':\n 20}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': 10,\n 'column': 20}, 'end': {'row': 10, 'column': 21}, 'action': 'insert',\n 'lines': ['i']}, {'start': {'row': 10, 'column': 21}, 'end': {'row': 10,\n 'column': 22}, 'action': 'insert', 'lines': ['t']}, {'start': {'row': \n 10, 'column': 22}, 'end': {'row': 10, 'column': 23}, 'action': 'insert',\n 'lines': ['i']}, {'start': {'row': 10, 'column': 23}, 'end': {'row': 10,\n 'column': 24}, 'action': 'insert', 'lines': ['v']}], [{'start': {'row':\n 10, 'column': 24}, 'end': {'row': 10, 'column': 25}, 'action': 'insert',\n 'lines': ['e'], 'id': 374}], [{'start': {'row': 10, 'column': 25},\n 'end': {'row': 10, 'column': 26}, 'action': 'insert', 'lines': ['_'],\n 'id': 375}, {'start': {'row': 10, 'column': 26}, 'end': {'row': 10,\n 'column': 27}, 'action': 'insert', 'lines': ['c']}, {'start': {'row': \n 10, 'column': 27}, 'end': {'row': 10, 'column': 28}, 'action': 'insert',\n 'lines': ['h']}, {'start': {'row': 10, 'column': 28}, 'end': {'row': 10,\n 'column': 29}, 'action': 'insert', 'lines': ['a']}, {'start': {'row': \n 10, 'column': 29}, 'end': {'row': 10, 'column': 30}, 'action': 'insert',\n 'lines': ['n']}, {'start': {'row': 10, 'column': 30}, 'end': {'row': 10,\n 'column': 31}, 'action': 'insert', 'lines': ['g']}, {'start': {'row': \n 10, 'column': 31}, 'end': {'row': 10, 'column': 32}, 'action': 'insert',\n 'lines': ['e']}], [{'start': {'row': 10, 'column': 32}, 'end': {'row': \n 10, 'column': 34}, 'action': 'insert', 'lines': ['()'], 'id': 376}], [{\n 'start': {'row': 32, 'column': 24}, 'end': {'row': 33, 'column': 0},\n 'action': 'insert', 'lines': ['', ''], 'id': 377}, {'start': {'row': 33,\n 'column': 0}, 'end': {'row': 33, 'column': 4}, 'action': 'insert',\n 'lines': [' ']}], [{'start': {'row': 33, 'column': 4}, 'end': {'row':\n 33, 'column': 30}, 'action': 'insert', 'lines': [\n 'get_float(\"Change owed: \")'], 'id': 378}], [{'start': {'row': 32,\n 'column': 24}, 'end': {'row': 33, 'column': 0}, 'action': 'insert',\n 'lines': ['', ''], 'id': 379}, {'start': {'row': 33, 'column': 0},\n 'end': {'row': 33, 'column': 4}, 'action': 'insert', 'lines': [' ']},\n {'start': {'row': 33, 'column': 4}, 'end': {'row': 33, 'column': 5},\n 'action': 'insert', 'lines': ['w']}, {'start': {'row': 33, 'column': 5},\n 'end': {'row': 33, 'column': 6}, 'action': 'insert', 'lines': ['h']}, {\n 'start': {'row': 33, 'column': 6}, 'end': {'row': 33, 'column': 7},\n 'action': 'insert', 'lines': ['i']}, {'start': {'row': 33, 'column': 7},\n 'end': {'row': 33, 'column': 8}, 'action': 'insert', 'lines': ['l']}, {\n 'start': {'row': 33, 'column': 8}, 'end': {'row': 33, 'column': 9},\n 'action': 'insert', 'lines': ['e']}], [{'start': {'row': 33, 'column': \n 9}, 'end': {'row': 33, 'column': 10}, 'action': 'insert', 'lines': [' '\n ], 'id': 380}], [{'start': {'row': 34, 'column': 4}, 'end': {'row': 34,\n 'column': 8}, 'action': 'insert', 'lines': [' '], 'id': 381}], [{\n 'start': {'row': 34, 'column': 8}, 'end': {'row': 34, 'column': 9},\n 'action': 'insert', 'lines': ['c'], 'id': 382}], [{'start': {'row': 34,\n 'column': 8}, 'end': {'row': 34, 'column': 9}, 'action': 'remove',\n 'lines': ['c'], 'id': 383}], [{'start': {'row': 34, 'column': 8}, 'end':\n {'row': 34, 'column': 9}, 'action': 'insert', 'lines': ['c'], 'id': 384\n }, {'start': {'row': 34, 'column': 9}, 'end': {'row': 34, 'column': 10},\n 'action': 'insert', 'lines': ['h']}, {'start': {'row': 34, 'column': 10\n }, 'end': {'row': 34, 'column': 11}, 'action': 'insert', 'lines': ['a']\n }, {'start': {'row': 34, 'column': 11}, 'end': {'row': 34, 'column': 12\n }, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 34, 'column':\n 12}, 'end': {'row': 34, 'column': 13}, 'action': 'insert', 'lines': [\n 'g']}, {'start': {'row': 34, 'column': 13}, 'end': {'row': 34, 'column':\n 14}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row': 34,\n 'column': 13}, 'end': {'row': 34, 'column': 14}, 'action': 'remove',\n 'lines': ['e'], 'id': 385}, {'start': {'row': 34, 'column': 12}, 'end':\n {'row': 34, 'column': 13}, 'action': 'remove', 'lines': ['g']}, {\n 'start': {'row': 34, 'column': 11}, 'end': {'row': 34, 'column': 12},\n 'action': 'remove', 'lines': ['n']}, {'start': {'row': 34, 'column': 10\n }, 'end': {'row': 34, 'column': 11}, 'action': 'remove', 'lines': ['a']\n }, {'start': {'row': 34, 'column': 9}, 'end': {'row': 34, 'column': 10},\n 'action': 'remove', 'lines': ['h']}, {'start': {'row': 34, 'column': 8},\n 'end': {'row': 34, 'column': 9}, 'action': 'remove', 'lines': ['c']}],\n [{'start': {'row': 34, 'column': 8}, 'end': {'row': 34, 'column': 9},\n 'action': 'insert', 'lines': ['r'], 'id': 386}, {'start': {'row': 34,\n 'column': 9}, 'end': {'row': 34, 'column': 10}, 'action': 'insert',\n 'lines': ['e']}, {'start': {'row': 34, 'column': 10}, 'end': {'row': 34,\n 'column': 11}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': \n 34, 'column': 11}, 'end': {'row': 34, 'column': 12}, 'action': 'insert',\n 'lines': ['p']}, {'start': {'row': 34, 'column': 12}, 'end': {'row': 34,\n 'column': 13}, 'action': 'insert', 'lines': ['o']}, {'start': {'row': \n 34, 'column': 13}, 'end': {'row': 34, 'column': 14}, 'action': 'insert',\n 'lines': ['n']}, {'start': {'row': 34, 'column': 14}, 'end': {'row': 34,\n 'column': 15}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': \n 34, 'column': 15}, 'end': {'row': 34, 'column': 16}, 'action': 'insert',\n 'lines': ['e']}], [{'start': {'row': 34, 'column': 15}, 'end': {'row': \n 34, 'column': 16}, 'action': 'remove', 'lines': ['e'], 'id': 387}, {\n 'start': {'row': 34, 'column': 14}, 'end': {'row': 34, 'column': 15},\n 'action': 'remove', 'lines': ['s']}, {'start': {'row': 34, 'column': 13\n }, 'end': {'row': 34, 'column': 14}, 'action': 'remove', 'lines': ['n']\n }, {'start': {'row': 34, 'column': 12}, 'end': {'row': 34, 'column': 13\n }, 'action': 'remove', 'lines': ['o']}, {'start': {'row': 34, 'column':\n 11}, 'end': {'row': 34, 'column': 12}, 'action': 'remove', 'lines': [\n 'p']}, {'start': {'row': 34, 'column': 10}, 'end': {'row': 34, 'column':\n 11}, 'action': 'remove', 'lines': ['s']}, {'start': {'row': 34,\n 'column': 9}, 'end': {'row': 34, 'column': 10}, 'action': 'remove',\n 'lines': ['e']}, {'start': {'row': 34, 'column': 8}, 'end': {'row': 34,\n 'column': 9}, 'action': 'remove', 'lines': ['r']}], [{'start': {'row': \n 34, 'column': 8}, 'end': {'row': 34, 'column': 9}, 'action': 'insert',\n 'lines': ['r'], 'id': 388}, {'start': {'row': 34, 'column': 9}, 'end':\n {'row': 34, 'column': 10}, 'action': 'insert', 'lines': ['e']}, {\n 'start': {'row': 34, 'column': 10}, 'end': {'row': 34, 'column': 11},\n 'action': 'insert', 'lines': ['s']}, {'start': {'row': 34, 'column': 11\n }, 'end': {'row': 34, 'column': 12}, 'action': 'insert', 'lines': ['p']\n }, {'start': {'row': 34, 'column': 12}, 'end': {'row': 34, 'column': 13\n }, 'action': 'insert', 'lines': ['o']}, {'start': {'row': 34, 'column':\n 13}, 'end': {'row': 34, 'column': 14}, 'action': 'insert', 'lines': [\n 'n']}, {'start': {'row': 34, 'column': 14}, 'end': {'row': 34, 'column':\n 15}, 'action': 'insert', 'lines': ['s']}, {'start': {'row': 34,\n 'column': 15}, 'end': {'row': 34, 'column': 16}, 'action': 'insert',\n 'lines': ['e']}], [{'start': {'row': 34, 'column': 16}, 'end': {'row': \n 34, 'column': 17}, 'action': 'insert', 'lines': [' '], 'id': 389}, {\n 'start': {'row': 34, 'column': 17}, 'end': {'row': 34, 'column': 18},\n 'action': 'insert', 'lines': ['=']}], [{'start': {'row': 34, 'column': \n 18}, 'end': {'row': 34, 'column': 19}, 'action': 'insert', 'lines': [\n ' '], 'id': 390}], [{'start': {'row': 33, 'column': 10}, 'end': {'row':\n 33, 'column': 11}, 'action': 'insert', 'lines': ['r'], 'id': 391}, {\n 'start': {'row': 33, 'column': 11}, 'end': {'row': 33, 'column': 12},\n 'action': 'insert', 'lines': ['e']}, {'start': {'row': 33, 'column': 12\n }, 'end': {'row': 33, 'column': 13}, 'action': 'insert', 'lines': ['s']\n }, {'start': {'row': 33, 'column': 13}, 'end': {'row': 33, 'column': 14\n }, 'action': 'insert', 'lines': ['p']}, {'start': {'row': 33, 'column':\n 14}, 'end': {'row': 33, 'column': 15}, 'action': 'insert', 'lines': [\n 'o']}, {'start': {'row': 33, 'column': 15}, 'end': {'row': 33, 'column':\n 16}, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 33,\n 'column': 16}, 'end': {'row': 33, 'column': 17}, 'action': 'insert',\n 'lines': ['s']}, {'start': {'row': 33, 'column': 17}, 'end': {'row': 33,\n 'column': 18}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row':\n 33, 'column': 18}, 'end': {'row': 33, 'column': 19}, 'action': 'insert',\n 'lines': [' '], 'id': 392}, {'start': {'row': 33, 'column': 19}, 'end':\n {'row': 33, 'column': 20}, 'action': 'insert', 'lines': ['<']}], [{\n 'start': {'row': 33, 'column': 20}, 'end': {'row': 33, 'column': 21},\n 'action': 'insert', 'lines': [' '], 'id': 393}], [{'start': {'row': 33,\n 'column': 21}, 'end': {'row': 33, 'column': 22}, 'action': 'insert',\n 'lines': ['0'], 'id': 394}], [{'start': {'row': 33, 'column': 22},\n 'end': {'row': 33, 'column': 23}, 'action': 'insert', 'lines': [':'],\n 'id': 395}], [{'start': {'row': 33, 'column': 4}, 'end': {'row': 34,\n 'column': 45}, 'action': 'remove', 'lines': ['while response < 0:',\n ' response = get_float(\"Change owed: \")'], 'id': 396}, {'start':\n {'row': 33, 'column': 4}, 'end': {'row': 37, 'column': 12}, 'action':\n 'insert', 'lines': ['while True:', ' n = get_int(\"Height: \")',\n ' if n > 0 and n <= 8:', ' break', ' return n']}],\n [{'start': {'row': 35, 'column': 17}, 'end': {'row': 35, 'column': 28},\n 'action': 'remove', 'lines': ['and n <= 8:'], 'id': 397}, {'start': {\n 'row': 35, 'column': 16}, 'end': {'row': 35, 'column': 17}, 'action':\n 'remove', 'lines': [' ']}], [{'start': {'row': 32, 'column': 23}, 'end':\n {'row': 32, 'column': 24}, 'action': 'insert', 'lines': ['('], 'id': \n 398}, {'start': {'row': 32, 'column': 24}, 'end': {'row': 32, 'column':\n 25}, 'action': 'insert', 'lines': [')']}], [{'start': {'row': 38,\n 'column': 4}, 'end': {'row': 39, 'column': 0}, 'action': 'insert',\n 'lines': ['', ''], 'id': 399}, {'start': {'row': 39, 'column': 0},\n 'end': {'row': 39, 'column': 4}, 'action': 'insert', 'lines': [' ']},\n {'start': {'row': 39, 'column': 4}, 'end': {'row': 40, 'column': 0},\n 'action': 'insert', 'lines': ['', '']}, {'start': {'row': 40, 'column':\n 0}, 'end': {'row': 40, 'column': 4}, 'action': 'insert', 'lines': [\n ' ']}], [{'start': {'row': 40, 'column': 0}, 'end': {'row': 40,\n 'column': 4}, 'action': 'remove', 'lines': [' '], 'id': 400}], [{\n 'start': {'row': 40, 'column': 0}, 'end': {'row': 40, 'column': 1},\n 'action': 'insert', 'lines': ['m'], 'id': 401}], [{'start': {'row': 40,\n 'column': 0}, 'end': {'row': 40, 'column': 1}, 'action': 'remove',\n 'lines': ['m'], 'id': 402}, {'start': {'row': 39, 'column': 4}, 'end':\n {'row': 40, 'column': 0}, 'action': 'remove', 'lines': ['', '']}, {\n 'start': {'row': 39, 'column': 0}, 'end': {'row': 39, 'column': 4},\n 'action': 'remove', 'lines': [' ']}], [{'start': {'row': 39,\n 'column': 0}, 'end': {'row': 39, 'column': 1}, 'action': 'insert',\n 'lines': ['m'], 'id': 403}, {'start': {'row': 39, 'column': 1}, 'end':\n {'row': 39, 'column': 2}, 'action': 'insert', 'lines': ['a']}, {'start':\n {'row': 39, 'column': 2}, 'end': {'row': 39, 'column': 3}, 'action':\n 'insert', 'lines': ['i']}, {'start': {'row': 39, 'column': 3}, 'end': {\n 'row': 39, 'column': 4}, 'action': 'insert', 'lines': ['n']}], [{\n 'start': {'row': 39, 'column': 4}, 'end': {'row': 39, 'column': 6},\n 'action': 'insert', 'lines': ['()'], 'id': 404}], [{'start': {'row': 35,\n 'column': 16}, 'end': {'row': 35, 'column': 17}, 'action': 'insert',\n 'lines': [':'], 'id': 405}], [{'start': {'row': 34, 'column': 16},\n 'end': {'row': 34, 'column': 19}, 'action': 'remove', 'lines': ['int'],\n 'id': 406}, {'start': {'row': 34, 'column': 16}, 'end': {'row': 34,\n 'column': 17}, 'action': 'insert', 'lines': ['f']}, {'start': {'row': \n 34, 'column': 17}, 'end': {'row': 34, 'column': 18}, 'action': 'insert',\n 'lines': ['l']}, {'start': {'row': 34, 'column': 18}, 'end': {'row': 34,\n 'column': 19}, 'action': 'insert', 'lines': ['o']}, {'start': {'row': \n 34, 'column': 19}, 'end': {'row': 34, 'column': 20}, 'action': 'insert',\n 'lines': ['a']}, {'start': {'row': 34, 'column': 20}, 'end': {'row': 34,\n 'column': 21}, 'action': 'insert', 'lines': ['t']}], [{'start': {'row':\n 34, 'column': 23}, 'end': {'row': 34, 'column': 29}, 'action': 'remove',\n 'lines': ['Height'], 'id': 407}, {'start': {'row': 34, 'column': 23},\n 'end': {'row': 34, 'column': 24}, 'action': 'insert', 'lines': ['C']},\n {'start': {'row': 34, 'column': 24}, 'end': {'row': 34, 'column': 25},\n 'action': 'insert', 'lines': ['h']}, {'start': {'row': 34, 'column': 25\n }, 'end': {'row': 34, 'column': 26}, 'action': 'insert', 'lines': ['a']\n }, {'start': {'row': 34, 'column': 26}, 'end': {'row': 34, 'column': 27\n }, 'action': 'insert', 'lines': ['n']}, {'start': {'row': 34, 'column':\n 27}, 'end': {'row': 34, 'column': 28}, 'action': 'insert', 'lines': [\n 'g']}, {'start': {'row': 34, 'column': 28}, 'end': {'row': 34, 'column':\n 29}, 'action': 'insert', 'lines': ['e']}], [{'start': {'row': 34,\n 'column': 29}, 'end': {'row': 34, 'column': 30}, 'action': 'insert',\n 'lines': [' '], 'id': 408}, {'start': {'row': 34, 'column': 30}, 'end':\n {'row': 34, 'column': 31}, 'action': 'insert', 'lines': ['o']}, {\n 'start': {'row': 34, 'column': 31}, 'end': {'row': 34, 'column': 32},\n 'action': 'insert', 'lines': ['w']}, {'start': {'row': 34, 'column': 32\n }, 'end': {'row': 34, 'column': 33}, 'action': 'insert', 'lines': ['e']\n }, {'start': {'row': 34, 'column': 33}, 'end': {'row': 34, 'column': 34\n }, 'action': 'insert', 'lines': ['d']}], [{'start': {'row': 35,\n 'column': 14}, 'end': {'row': 35, 'column': 15}, 'action': 'insert',\n 'lines': ['='], 'id': 409}]]}, 'ace': {'folds': [], 'scrolltop': 439.5,\n 'scrollleft': 0, 'selection': {'start': {'row': 35, 'column': 15},\n 'end': {'row': 35, 'column': 15}, 'isBackwards': false}, 'options': {\n 'guessTabSize': true, 'useWrapMode': false, 'wrapToView': true},\n 'firstLineState': {'row': 26, 'state': 'start', 'mode':\n 'ace/mode/python'}}, 'timestamp': 1595063222568, 'hash':\n '5e254036c6adabee2cc8fd34ca538d47c56fc31e'}\n",
"step-3": "{\"filter\":false,\"title\":\"cash.py\",\"tooltip\":\"/pset6/cash/cash.py\",\"undoManager\":{\"mark\":100,\"position\":100,\"stack\":[[{\"start\":{\"row\":12,\"column\":7},\"end\":{\"row\":12,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":308},{\"start\":{\"row\":12,\"column\":8},\"end\":{\"row\":12,\"column\":9},\"action\":\"insert\",\"lines\":[\">\"]}],[{\"start\":{\"row\":12,\"column\":9},\"end\":{\"row\":12,\"column\":10},\"action\":\"insert\",\"lines\":[\" \"],\"id\":309}],[{\"start\":{\"row\":12,\"column\":9},\"end\":{\"row\":12,\"column\":10},\"action\":\"remove\",\"lines\":[\" \"],\"id\":310}],[{\"start\":{\"row\":12,\"column\":9},\"end\":{\"row\":12,\"column\":10},\"action\":\"insert\",\"lines\":[\"=\"],\"id\":311}],[{\"start\":{\"row\":12,\"column\":10},\"end\":{\"row\":12,\"column\":11},\"action\":\"insert\",\"lines\":[\" \"],\"id\":312},{\"start\":{\"row\":12,\"column\":11},\"end\":{\"row\":12,\"column\":12},\"action\":\"insert\",\"lines\":[\"q\"]},{\"start\":{\"row\":12,\"column\":12},\"end\":{\"row\":12,\"column\":13},\"action\":\"insert\",\"lines\":[\"u\"]},{\"start\":{\"row\":12,\"column\":13},\"end\":{\"row\":12,\"column\":14},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":12,\"column\":14},\"end\":{\"row\":12,\"column\":15},\"action\":\"insert\",\"lines\":[\"r\"]},{\"start\":{\"row\":12,\"column\":15},\"end\":{\"row\":12,\"column\":16},\"action\":\"insert\",\"lines\":[\"t\"]},{\"start\":{\"row\":12,\"column\":16},\"end\":{\"row\":12,\"column\":17},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":12,\"column\":17},\"end\":{\"row\":12,\"column\":18},\"action\":\"insert\",\"lines\":[\"r\"]}],[{\"start\":{\"row\":14,\"column\":0},\"end\":{\"row\":15,\"column\":13},\"action\":\"remove\",\"lines\":[\" if c < quarter:\",\" break\"],\"id\":313},{\"start\":{\"row\":13,\"column\":19},\"end\":{\"row\":14,\"column\":0},\"action\":\"remove\",\"lines\":[\"\",\"\"]}],[{\"start\":{\"row\":16,\"column\":6},\"end\":{\"row\":16,\"column\":10},\"action\":\"remove\",\"lines\":[\"True\"],\"id\":314},{\"start\":{\"row\":16,\"column\":6},\"end\":{\"row\":16,\"column\":7},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":16,\"column\":7},\"end\":{\"row\":16,\"column\":8},\"action\":\"insert\",\"lines\":[\">\"]}],[{\"start\":{\"row\":16,\"column\":8},\"end\":{\"row\":16,\"column\":9},\"action\":\"insert\",\"lines\":[\"=\"],\"id\":315}],[{\"start\":{\"row\":16,\"column\":9},\"end\":{\"row\":16,\"column\":10},\"action\":\"insert\",\"lines\":[\" \"],\"id\":316},{\"start\":{\"row\":16,\"column\":10},\"end\":{\"row\":16,\"column\":11},\"action\":\"insert\",\"lines\":[\"d\"]},{\"start\":{\"row\":16,\"column\":11},\"end\":{\"row\":16,\"column\":12},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":16,\"column\":12},\"end\":{\"row\":16,\"column\":13},\"action\":\"insert\",\"lines\":[\"m\"]},{\"start\":{\"row\":16,\"column\":13},\"end\":{\"row\":16,\"column\":14},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":16,\"column\":7},\"end\":{\"row\":16,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":317}],[{\"start\":{\"row\":18,\"column\":0},\"end\":{\"row\":19,\"column\":13},\"action\":\"remove\",\"lines\":[\" if c < dime:\",\" break\"],\"id\":318},{\"start\":{\"row\":17,\"column\":16},\"end\":{\"row\":18,\"column\":0},\"action\":\"remove\",\"lines\":[\"\",\"\"]}],[{\"start\":{\"row\":20,\"column\":6},\"end\":{\"row\":20,\"column\":10},\"action\":\"remove\",\"lines\":[\"True\"],\"id\":319}],[{\"start\":{\"row\":20,\"column\":6},\"end\":{\"row\":20,\"column\":7},\"action\":\"insert\",\"lines\":[\"c\"],\"id\":320}],[{\"start\":{\"row\":20,\"column\":7},\"end\":{\"row\":20,\"column\":8},\"action\":\"insert\",\"lines\":[\">\"],\"id\":321}],[{\"start\":{\"row\":20,\"column\":7},\"end\":{\"row\":20,\"column\":8},\"action\":\"remove\",\"lines\":[\">\"],\"id\":322}],[{\"start\":{\"row\":20,\"column\":7},\"end\":{\"row\":20,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":323},{\"start\":{\"row\":20,\"column\":8},\"end\":{\"row\":20,\"column\":9},\"action\":\"insert\",\"lines\":[\">\"]}],[{\"start\":{\"row\":20,\"column\":9},\"end\":{\"row\":20,\"column\":10},\"action\":\"insert\",\"lines\":[\"=\"],\"id\":324}],[{\"start\":{\"row\":20,\"column\":10},\"end\":{\"row\":20,\"column\":11},\"action\":\"insert\",\"lines\":[\" \"],\"id\":325},{\"start\":{\"row\":20,\"column\":11},\"end\":{\"row\":20,\"column\":12},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":20,\"column\":12},\"end\":{\"row\":20,\"column\":13},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":20,\"column\":13},\"end\":{\"row\":20,\"column\":14},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":20,\"column\":14},\"end\":{\"row\":20,\"column\":15},\"action\":\"insert\",\"lines\":[\"k\"]},{\"start\":{\"row\":20,\"column\":15},\"end\":{\"row\":20,\"column\":16},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":20,\"column\":16},\"end\":{\"row\":20,\"column\":17},\"action\":\"insert\",\"lines\":[\"l\"]}],[{\"start\":{\"row\":22,\"column\":1},\"end\":{\"row\":23,\"column\":13},\"action\":\"remove\",\"lines\":[\" if c < nickel:\",\" break\"],\"id\":326},{\"start\":{\"row\":22,\"column\":0},\"end\":{\"row\":22,\"column\":1},\"action\":\"remove\",\"lines\":[\" \"]},{\"start\":{\"row\":21,\"column\":18},\"end\":{\"row\":22,\"column\":0},\"action\":\"remove\",\"lines\":[\"\",\"\"]}],[{\"start\":{\"row\":24,\"column\":9},\"end\":{\"row\":24,\"column\":10},\"action\":\"remove\",\"lines\":[\"e\"],\"id\":327},{\"start\":{\"row\":24,\"column\":8},\"end\":{\"row\":24,\"column\":9},\"action\":\"remove\",\"lines\":[\"u\"]},{\"start\":{\"row\":24,\"column\":7},\"end\":{\"row\":24,\"column\":8},\"action\":\"remove\",\"lines\":[\"r\"]},{\"start\":{\"row\":24,\"column\":6},\"end\":{\"row\":24,\"column\":7},\"action\":\"remove\",\"lines\":[\"T\"]}],[{\"start\":{\"row\":24,\"column\":6},\"end\":{\"row\":24,\"column\":7},\"action\":\"insert\",\"lines\":[\"c\"],\"id\":328}],[{\"start\":{\"row\":24,\"column\":7},\"end\":{\"row\":24,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":329},{\"start\":{\"row\":24,\"column\":8},\"end\":{\"row\":24,\"column\":9},\"action\":\"insert\",\"lines\":[\">\"]},{\"start\":{\"row\":24,\"column\":9},\"end\":{\"row\":24,\"column\":10},\"action\":\"insert\",\"lines\":[\"=\"]}],[{\"start\":{\"row\":24,\"column\":10},\"end\":{\"row\":24,\"column\":11},\"action\":\"insert\",\"lines\":[\" \"],\"id\":330},{\"start\":{\"row\":24,\"column\":11},\"end\":{\"row\":24,\"column\":12},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":24,\"column\":12},\"end\":{\"row\":24,\"column\":13},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":24,\"column\":13},\"end\":{\"row\":24,\"column\":14},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":24,\"column\":14},\"end\":{\"row\":24,\"column\":15},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":24,\"column\":15},\"end\":{\"row\":24,\"column\":16},\"action\":\"insert\",\"lines\":[\"i\"]}],[{\"start\":{\"row\":24,\"column\":16},\"end\":{\"row\":24,\"column\":17},\"action\":\"insert\",\"lines\":[\"e\"],\"id\":331}],[{\"start\":{\"row\":26,\"column\":3},\"end\":{\"row\":27,\"column\":13},\"action\":\"remove\",\"lines\":[\" if c < 0:\",\" break\"],\"id\":332},{\"start\":{\"row\":26,\"column\":2},\"end\":{\"row\":26,\"column\":3},\"action\":\"remove\",\"lines\":[\" \"]},{\"start\":{\"row\":26,\"column\":1},\"end\":{\"row\":26,\"column\":2},\"action\":\"remove\",\"lines\":[\" \"]},{\"start\":{\"row\":26,\"column\":0},\"end\":{\"row\":26,\"column\":1},\"action\":\"remove\",\"lines\":[\" \"]},{\"start\":{\"row\":25,\"column\":18},\"end\":{\"row\":26,\"column\":0},\"action\":\"remove\",\"lines\":[\"\",\"\"]}],[{\"start\":{\"row\":1,\"column\":0},\"end\":{\"row\":2,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":333},{\"start\":{\"row\":2,\"column\":0},\"end\":{\"row\":3,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"]}],[{\"start\":{\"row\":2,\"column\":0},\"end\":{\"row\":2,\"column\":1},\"action\":\"insert\",\"lines\":[\"d\"],\"id\":334},{\"start\":{\"row\":2,\"column\":1},\"end\":{\"row\":2,\"column\":2},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":2,\"column\":2},\"end\":{\"row\":2,\"column\":3},\"action\":\"insert\",\"lines\":[\"f\"]}],[{\"start\":{\"row\":2,\"column\":3},\"end\":{\"row\":2,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":335},{\"start\":{\"row\":2,\"column\":4},\"end\":{\"row\":2,\"column\":5},\"action\":\"insert\",\"lines\":[\"m\"]},{\"start\":{\"row\":2,\"column\":5},\"end\":{\"row\":2,\"column\":6},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":2,\"column\":6},\"end\":{\"row\":2,\"column\":7},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":2,\"column\":7},\"end\":{\"row\":2,\"column\":8},\"action\":\"insert\",\"lines\":[\"n\"]}],[{\"start\":{\"row\":2,\"column\":8},\"end\":{\"row\":2,\"column\":10},\"action\":\"insert\",\"lines\":[\"()\"],\"id\":336}],[{\"start\":{\"row\":2,\"column\":10},\"end\":{\"row\":2,\"column\":11},\"action\":\"insert\",\"lines\":[\":\"],\"id\":337}],[{\"start\":{\"row\":4,\"column\":0},\"end\":{\"row\":4,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":338}],[{\"start\":{\"row\":5,\"column\":0},\"end\":{\"row\":5,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":339}],[{\"start\":{\"row\":6,\"column\":0},\"end\":{\"row\":6,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":340}],[{\"start\":{\"row\":7,\"column\":0},\"end\":{\"row\":7,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":341}],[{\"start\":{\"row\":8,\"column\":0},\"end\":{\"row\":8,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":342}],[{\"start\":{\"row\":10,\"column\":0},\"end\":{\"row\":10,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":343}],[{\"start\":{\"row\":12,\"column\":0},\"end\":{\"row\":12,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":344}],[{\"start\":{\"row\":14,\"column\":0},\"end\":{\"row\":14,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":345}],[{\"start\":{\"row\":15,\"column\":4},\"end\":{\"row\":15,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":346}],[{\"start\":{\"row\":16,\"column\":4},\"end\":{\"row\":16,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":347}],[{\"start\":{\"row\":18,\"column\":0},\"end\":{\"row\":18,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":348}],[{\"start\":{\"row\":19,\"column\":4},\"end\":{\"row\":19,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":349}],[{\"start\":{\"row\":20,\"column\":4},\"end\":{\"row\":20,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":350}],[{\"start\":{\"row\":22,\"column\":0},\"end\":{\"row\":22,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":351}],[{\"start\":{\"row\":23,\"column\":4},\"end\":{\"row\":23,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":352}],[{\"start\":{\"row\":24,\"column\":4},\"end\":{\"row\":24,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":353}],[{\"start\":{\"row\":24,\"column\":8},\"end\":{\"row\":24,\"column\":12},\"action\":\"insert\",\"lines\":[\" \"],\"id\":354}],[{\"start\":{\"row\":24,\"column\":8},\"end\":{\"row\":24,\"column\":12},\"action\":\"remove\",\"lines\":[\" \"],\"id\":355}],[{\"start\":{\"row\":26,\"column\":0},\"end\":{\"row\":26,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":356}],[{\"start\":{\"row\":27,\"column\":4},\"end\":{\"row\":27,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":357}],[{\"start\":{\"row\":28,\"column\":4},\"end\":{\"row\":28,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":358}],[{\"start\":{\"row\":30,\"column\":0},\"end\":{\"row\":30,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":359}],[{\"start\":{\"row\":30,\"column\":25},\"end\":{\"row\":31,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":360},{\"start\":{\"row\":31,\"column\":0},\"end\":{\"row\":31,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]},{\"start\":{\"row\":31,\"column\":4},\"end\":{\"row\":32,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"]},{\"start\":{\"row\":32,\"column\":0},\"end\":{\"row\":32,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]}],[{\"start\":{\"row\":32,\"column\":0},\"end\":{\"row\":32,\"column\":4},\"action\":\"remove\",\"lines\":[\" \"],\"id\":361}],[{\"start\":{\"row\":32,\"column\":0},\"end\":{\"row\":32,\"column\":1},\"action\":\"insert\",\"lines\":[\"d\"],\"id\":362},{\"start\":{\"row\":32,\"column\":1},\"end\":{\"row\":32,\"column\":2},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":32,\"column\":2},\"end\":{\"row\":32,\"column\":3},\"action\":\"insert\",\"lines\":[\"f\"]}],[{\"start\":{\"row\":32,\"column\":3},\"end\":{\"row\":32,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"],\"id\":363}],[{\"start\":{\"row\":32,\"column\":4},\"end\":{\"row\":32,\"column\":5},\"action\":\"insert\",\"lines\":[\"g\"],\"id\":364},{\"start\":{\"row\":32,\"column\":5},\"end\":{\"row\":32,\"column\":6},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":32,\"column\":6},\"end\":{\"row\":32,\"column\":7},\"action\":\"insert\",\"lines\":[\"t\"]},{\"start\":{\"row\":32,\"column\":7},\"end\":{\"row\":32,\"column\":8},\"action\":\"insert\",\"lines\":[\"_\"]},{\"start\":{\"row\":32,\"column\":8},\"end\":{\"row\":32,\"column\":9},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":32,\"column\":9},\"end\":{\"row\":32,\"column\":10},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":32,\"column\":10},\"end\":{\"row\":32,\"column\":11},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":32,\"column\":11},\"end\":{\"row\":32,\"column\":12},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":32,\"column\":12},\"end\":{\"row\":32,\"column\":13},\"action\":\"insert\",\"lines\":[\"t\"]},{\"start\":{\"row\":32,\"column\":13},\"end\":{\"row\":32,\"column\":14},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":32,\"column\":14},\"end\":{\"row\":32,\"column\":15},\"action\":\"insert\",\"lines\":[\"v\"]},{\"start\":{\"row\":32,\"column\":15},\"end\":{\"row\":32,\"column\":16},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":32,\"column\":16},\"end\":{\"row\":32,\"column\":17},\"action\":\"insert\",\"lines\":[\" \"],\"id\":365},{\"start\":{\"row\":32,\"column\":17},\"end\":{\"row\":32,\"column\":18},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":32,\"column\":18},\"end\":{\"row\":32,\"column\":19},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":32,\"column\":19},\"end\":{\"row\":32,\"column\":20},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":32,\"column\":20},\"end\":{\"row\":32,\"column\":21},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":32,\"column\":21},\"end\":{\"row\":32,\"column\":22},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":32,\"column\":22},\"end\":{\"row\":32,\"column\":23},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":32,\"column\":22},\"end\":{\"row\":32,\"column\":23},\"action\":\"remove\",\"lines\":[\"e\"],\"id\":366},{\"start\":{\"row\":32,\"column\":21},\"end\":{\"row\":32,\"column\":22},\"action\":\"remove\",\"lines\":[\"g\"]},{\"start\":{\"row\":32,\"column\":20},\"end\":{\"row\":32,\"column\":21},\"action\":\"remove\",\"lines\":[\"n\"]},{\"start\":{\"row\":32,\"column\":19},\"end\":{\"row\":32,\"column\":20},\"action\":\"remove\",\"lines\":[\"a\"]},{\"start\":{\"row\":32,\"column\":18},\"end\":{\"row\":32,\"column\":19},\"action\":\"remove\",\"lines\":[\"h\"]},{\"start\":{\"row\":32,\"column\":17},\"end\":{\"row\":32,\"column\":18},\"action\":\"remove\",\"lines\":[\"c\"]},{\"start\":{\"row\":32,\"column\":16},\"end\":{\"row\":32,\"column\":17},\"action\":\"remove\",\"lines\":[\" \"]}],[{\"start\":{\"row\":32,\"column\":16},\"end\":{\"row\":32,\"column\":17},\"action\":\"insert\",\"lines\":[\"_\"],\"id\":367},{\"start\":{\"row\":32,\"column\":17},\"end\":{\"row\":32,\"column\":18},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":32,\"column\":18},\"end\":{\"row\":32,\"column\":19},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":32,\"column\":19},\"end\":{\"row\":32,\"column\":20},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":32,\"column\":20},\"end\":{\"row\":32,\"column\":21},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":32,\"column\":21},\"end\":{\"row\":32,\"column\":22},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":32,\"column\":22},\"end\":{\"row\":32,\"column\":23},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":32,\"column\":23},\"end\":{\"row\":32,\"column\":24},\"action\":\"insert\",\"lines\":[\":\"]}],[{\"start\":{\"row\":32,\"column\":24},\"end\":{\"row\":33,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":368},{\"start\":{\"row\":33,\"column\":0},\"end\":{\"row\":33,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]}],[{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":33,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":369}],[{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":33,\"column\":8},\"action\":\"remove\",\"lines\":[\" \"],\"id\":370}],[{\"start\":{\"row\":10,\"column\":13},\"end\":{\"row\":10,\"column\":39},\"action\":\"remove\",\"lines\":[\"get_float(\\\"Change owed: \\\")\"],\"id\":372}],[{\"start\":{\"row\":10,\"column\":13},\"end\":{\"row\":10,\"column\":14},\"action\":\"insert\",\"lines\":[\"g\"],\"id\":373},{\"start\":{\"row\":10,\"column\":14},\"end\":{\"row\":10,\"column\":15},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":10,\"column\":15},\"end\":{\"row\":10,\"column\":16},\"action\":\"insert\",\"lines\":[\"t\"]},{\"start\":{\"row\":10,\"column\":16},\"end\":{\"row\":10,\"column\":17},\"action\":\"insert\",\"lines\":[\"_\"]},{\"start\":{\"row\":10,\"column\":17},\"end\":{\"row\":10,\"column\":18},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":10,\"column\":18},\"end\":{\"row\":10,\"column\":19},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":10,\"column\":19},\"end\":{\"row\":10,\"column\":20},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":10,\"column\":20},\"end\":{\"row\":10,\"column\":21},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":10,\"column\":21},\"end\":{\"row\":10,\"column\":22},\"action\":\"insert\",\"lines\":[\"t\"]},{\"start\":{\"row\":10,\"column\":22},\"end\":{\"row\":10,\"column\":23},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":10,\"column\":23},\"end\":{\"row\":10,\"column\":24},\"action\":\"insert\",\"lines\":[\"v\"]}],[{\"start\":{\"row\":10,\"column\":24},\"end\":{\"row\":10,\"column\":25},\"action\":\"insert\",\"lines\":[\"e\"],\"id\":374}],[{\"start\":{\"row\":10,\"column\":25},\"end\":{\"row\":10,\"column\":26},\"action\":\"insert\",\"lines\":[\"_\"],\"id\":375},{\"start\":{\"row\":10,\"column\":26},\"end\":{\"row\":10,\"column\":27},\"action\":\"insert\",\"lines\":[\"c\"]},{\"start\":{\"row\":10,\"column\":27},\"end\":{\"row\":10,\"column\":28},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":10,\"column\":28},\"end\":{\"row\":10,\"column\":29},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":10,\"column\":29},\"end\":{\"row\":10,\"column\":30},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":10,\"column\":30},\"end\":{\"row\":10,\"column\":31},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":10,\"column\":31},\"end\":{\"row\":10,\"column\":32},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":10,\"column\":32},\"end\":{\"row\":10,\"column\":34},\"action\":\"insert\",\"lines\":[\"()\"],\"id\":376}],[{\"start\":{\"row\":32,\"column\":24},\"end\":{\"row\":33,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":377},{\"start\":{\"row\":33,\"column\":0},\"end\":{\"row\":33,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]}],[{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":33,\"column\":30},\"action\":\"insert\",\"lines\":[\"get_float(\\\"Change owed: \\\")\"],\"id\":378}],[{\"start\":{\"row\":32,\"column\":24},\"end\":{\"row\":33,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":379},{\"start\":{\"row\":33,\"column\":0},\"end\":{\"row\":33,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]},{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":33,\"column\":5},\"action\":\"insert\",\"lines\":[\"w\"]},{\"start\":{\"row\":33,\"column\":5},\"end\":{\"row\":33,\"column\":6},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":33,\"column\":6},\"end\":{\"row\":33,\"column\":7},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":33,\"column\":7},\"end\":{\"row\":33,\"column\":8},\"action\":\"insert\",\"lines\":[\"l\"]},{\"start\":{\"row\":33,\"column\":8},\"end\":{\"row\":33,\"column\":9},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":33,\"column\":9},\"end\":{\"row\":33,\"column\":10},\"action\":\"insert\",\"lines\":[\" \"],\"id\":380}],[{\"start\":{\"row\":34,\"column\":4},\"end\":{\"row\":34,\"column\":8},\"action\":\"insert\",\"lines\":[\" \"],\"id\":381}],[{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"insert\",\"lines\":[\"c\"],\"id\":382}],[{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"remove\",\"lines\":[\"c\"],\"id\":383}],[{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"insert\",\"lines\":[\"c\"],\"id\":384},{\"start\":{\"row\":34,\"column\":9},\"end\":{\"row\":34,\"column\":10},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":34,\"column\":10},\"end\":{\"row\":34,\"column\":11},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":34,\"column\":11},\"end\":{\"row\":34,\"column\":12},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":12},\"end\":{\"row\":34,\"column\":13},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":34,\"column\":13},\"end\":{\"row\":34,\"column\":14},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":34,\"column\":13},\"end\":{\"row\":34,\"column\":14},\"action\":\"remove\",\"lines\":[\"e\"],\"id\":385},{\"start\":{\"row\":34,\"column\":12},\"end\":{\"row\":34,\"column\":13},\"action\":\"remove\",\"lines\":[\"g\"]},{\"start\":{\"row\":34,\"column\":11},\"end\":{\"row\":34,\"column\":12},\"action\":\"remove\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":10},\"end\":{\"row\":34,\"column\":11},\"action\":\"remove\",\"lines\":[\"a\"]},{\"start\":{\"row\":34,\"column\":9},\"end\":{\"row\":34,\"column\":10},\"action\":\"remove\",\"lines\":[\"h\"]},{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"remove\",\"lines\":[\"c\"]}],[{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"insert\",\"lines\":[\"r\"],\"id\":386},{\"start\":{\"row\":34,\"column\":9},\"end\":{\"row\":34,\"column\":10},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":34,\"column\":10},\"end\":{\"row\":34,\"column\":11},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":11},\"end\":{\"row\":34,\"column\":12},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":34,\"column\":12},\"end\":{\"row\":34,\"column\":13},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":34,\"column\":13},\"end\":{\"row\":34,\"column\":14},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":14},\"end\":{\"row\":34,\"column\":15},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":15},\"end\":{\"row\":34,\"column\":16},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":34,\"column\":15},\"end\":{\"row\":34,\"column\":16},\"action\":\"remove\",\"lines\":[\"e\"],\"id\":387},{\"start\":{\"row\":34,\"column\":14},\"end\":{\"row\":34,\"column\":15},\"action\":\"remove\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":13},\"end\":{\"row\":34,\"column\":14},\"action\":\"remove\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":12},\"end\":{\"row\":34,\"column\":13},\"action\":\"remove\",\"lines\":[\"o\"]},{\"start\":{\"row\":34,\"column\":11},\"end\":{\"row\":34,\"column\":12},\"action\":\"remove\",\"lines\":[\"p\"]},{\"start\":{\"row\":34,\"column\":10},\"end\":{\"row\":34,\"column\":11},\"action\":\"remove\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":9},\"end\":{\"row\":34,\"column\":10},\"action\":\"remove\",\"lines\":[\"e\"]},{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"remove\",\"lines\":[\"r\"]}],[{\"start\":{\"row\":34,\"column\":8},\"end\":{\"row\":34,\"column\":9},\"action\":\"insert\",\"lines\":[\"r\"],\"id\":388},{\"start\":{\"row\":34,\"column\":9},\"end\":{\"row\":34,\"column\":10},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":34,\"column\":10},\"end\":{\"row\":34,\"column\":11},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":11},\"end\":{\"row\":34,\"column\":12},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":34,\"column\":12},\"end\":{\"row\":34,\"column\":13},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":34,\"column\":13},\"end\":{\"row\":34,\"column\":14},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":14},\"end\":{\"row\":34,\"column\":15},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":34,\"column\":15},\"end\":{\"row\":34,\"column\":16},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":34,\"column\":16},\"end\":{\"row\":34,\"column\":17},\"action\":\"insert\",\"lines\":[\" \"],\"id\":389},{\"start\":{\"row\":34,\"column\":17},\"end\":{\"row\":34,\"column\":18},\"action\":\"insert\",\"lines\":[\"=\"]}],[{\"start\":{\"row\":34,\"column\":18},\"end\":{\"row\":34,\"column\":19},\"action\":\"insert\",\"lines\":[\" \"],\"id\":390}],[{\"start\":{\"row\":33,\"column\":10},\"end\":{\"row\":33,\"column\":11},\"action\":\"insert\",\"lines\":[\"r\"],\"id\":391},{\"start\":{\"row\":33,\"column\":11},\"end\":{\"row\":33,\"column\":12},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":33,\"column\":12},\"end\":{\"row\":33,\"column\":13},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":33,\"column\":13},\"end\":{\"row\":33,\"column\":14},\"action\":\"insert\",\"lines\":[\"p\"]},{\"start\":{\"row\":33,\"column\":14},\"end\":{\"row\":33,\"column\":15},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":33,\"column\":15},\"end\":{\"row\":33,\"column\":16},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":33,\"column\":16},\"end\":{\"row\":33,\"column\":17},\"action\":\"insert\",\"lines\":[\"s\"]},{\"start\":{\"row\":33,\"column\":17},\"end\":{\"row\":33,\"column\":18},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":33,\"column\":18},\"end\":{\"row\":33,\"column\":19},\"action\":\"insert\",\"lines\":[\" \"],\"id\":392},{\"start\":{\"row\":33,\"column\":19},\"end\":{\"row\":33,\"column\":20},\"action\":\"insert\",\"lines\":[\"<\"]}],[{\"start\":{\"row\":33,\"column\":20},\"end\":{\"row\":33,\"column\":21},\"action\":\"insert\",\"lines\":[\" \"],\"id\":393}],[{\"start\":{\"row\":33,\"column\":21},\"end\":{\"row\":33,\"column\":22},\"action\":\"insert\",\"lines\":[\"0\"],\"id\":394}],[{\"start\":{\"row\":33,\"column\":22},\"end\":{\"row\":33,\"column\":23},\"action\":\"insert\",\"lines\":[\":\"],\"id\":395}],[{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":34,\"column\":45},\"action\":\"remove\",\"lines\":[\"while response < 0:\",\" response = get_float(\\\"Change owed: \\\")\"],\"id\":396},{\"start\":{\"row\":33,\"column\":4},\"end\":{\"row\":37,\"column\":12},\"action\":\"insert\",\"lines\":[\"while True:\",\" n = get_int(\\\"Height: \\\")\",\" if n > 0 and n <= 8:\",\" break\",\" return n\"]}],[{\"start\":{\"row\":35,\"column\":17},\"end\":{\"row\":35,\"column\":28},\"action\":\"remove\",\"lines\":[\"and n <= 8:\"],\"id\":397},{\"start\":{\"row\":35,\"column\":16},\"end\":{\"row\":35,\"column\":17},\"action\":\"remove\",\"lines\":[\" \"]}],[{\"start\":{\"row\":32,\"column\":23},\"end\":{\"row\":32,\"column\":24},\"action\":\"insert\",\"lines\":[\"(\"],\"id\":398},{\"start\":{\"row\":32,\"column\":24},\"end\":{\"row\":32,\"column\":25},\"action\":\"insert\",\"lines\":[\")\"]}],[{\"start\":{\"row\":38,\"column\":4},\"end\":{\"row\":39,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"],\"id\":399},{\"start\":{\"row\":39,\"column\":0},\"end\":{\"row\":39,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]},{\"start\":{\"row\":39,\"column\":4},\"end\":{\"row\":40,\"column\":0},\"action\":\"insert\",\"lines\":[\"\",\"\"]},{\"start\":{\"row\":40,\"column\":0},\"end\":{\"row\":40,\"column\":4},\"action\":\"insert\",\"lines\":[\" \"]}],[{\"start\":{\"row\":40,\"column\":0},\"end\":{\"row\":40,\"column\":4},\"action\":\"remove\",\"lines\":[\" \"],\"id\":400}],[{\"start\":{\"row\":40,\"column\":0},\"end\":{\"row\":40,\"column\":1},\"action\":\"insert\",\"lines\":[\"m\"],\"id\":401}],[{\"start\":{\"row\":40,\"column\":0},\"end\":{\"row\":40,\"column\":1},\"action\":\"remove\",\"lines\":[\"m\"],\"id\":402},{\"start\":{\"row\":39,\"column\":4},\"end\":{\"row\":40,\"column\":0},\"action\":\"remove\",\"lines\":[\"\",\"\"]},{\"start\":{\"row\":39,\"column\":0},\"end\":{\"row\":39,\"column\":4},\"action\":\"remove\",\"lines\":[\" \"]}],[{\"start\":{\"row\":39,\"column\":0},\"end\":{\"row\":39,\"column\":1},\"action\":\"insert\",\"lines\":[\"m\"],\"id\":403},{\"start\":{\"row\":39,\"column\":1},\"end\":{\"row\":39,\"column\":2},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":39,\"column\":2},\"end\":{\"row\":39,\"column\":3},\"action\":\"insert\",\"lines\":[\"i\"]},{\"start\":{\"row\":39,\"column\":3},\"end\":{\"row\":39,\"column\":4},\"action\":\"insert\",\"lines\":[\"n\"]}],[{\"start\":{\"row\":39,\"column\":4},\"end\":{\"row\":39,\"column\":6},\"action\":\"insert\",\"lines\":[\"()\"],\"id\":404}],[{\"start\":{\"row\":35,\"column\":16},\"end\":{\"row\":35,\"column\":17},\"action\":\"insert\",\"lines\":[\":\"],\"id\":405}],[{\"start\":{\"row\":34,\"column\":16},\"end\":{\"row\":34,\"column\":19},\"action\":\"remove\",\"lines\":[\"int\"],\"id\":406},{\"start\":{\"row\":34,\"column\":16},\"end\":{\"row\":34,\"column\":17},\"action\":\"insert\",\"lines\":[\"f\"]},{\"start\":{\"row\":34,\"column\":17},\"end\":{\"row\":34,\"column\":18},\"action\":\"insert\",\"lines\":[\"l\"]},{\"start\":{\"row\":34,\"column\":18},\"end\":{\"row\":34,\"column\":19},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":34,\"column\":19},\"end\":{\"row\":34,\"column\":20},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":34,\"column\":20},\"end\":{\"row\":34,\"column\":21},\"action\":\"insert\",\"lines\":[\"t\"]}],[{\"start\":{\"row\":34,\"column\":23},\"end\":{\"row\":34,\"column\":29},\"action\":\"remove\",\"lines\":[\"Height\"],\"id\":407},{\"start\":{\"row\":34,\"column\":23},\"end\":{\"row\":34,\"column\":24},\"action\":\"insert\",\"lines\":[\"C\"]},{\"start\":{\"row\":34,\"column\":24},\"end\":{\"row\":34,\"column\":25},\"action\":\"insert\",\"lines\":[\"h\"]},{\"start\":{\"row\":34,\"column\":25},\"end\":{\"row\":34,\"column\":26},\"action\":\"insert\",\"lines\":[\"a\"]},{\"start\":{\"row\":34,\"column\":26},\"end\":{\"row\":34,\"column\":27},\"action\":\"insert\",\"lines\":[\"n\"]},{\"start\":{\"row\":34,\"column\":27},\"end\":{\"row\":34,\"column\":28},\"action\":\"insert\",\"lines\":[\"g\"]},{\"start\":{\"row\":34,\"column\":28},\"end\":{\"row\":34,\"column\":29},\"action\":\"insert\",\"lines\":[\"e\"]}],[{\"start\":{\"row\":34,\"column\":29},\"end\":{\"row\":34,\"column\":30},\"action\":\"insert\",\"lines\":[\" \"],\"id\":408},{\"start\":{\"row\":34,\"column\":30},\"end\":{\"row\":34,\"column\":31},\"action\":\"insert\",\"lines\":[\"o\"]},{\"start\":{\"row\":34,\"column\":31},\"end\":{\"row\":34,\"column\":32},\"action\":\"insert\",\"lines\":[\"w\"]},{\"start\":{\"row\":34,\"column\":32},\"end\":{\"row\":34,\"column\":33},\"action\":\"insert\",\"lines\":[\"e\"]},{\"start\":{\"row\":34,\"column\":33},\"end\":{\"row\":34,\"column\":34},\"action\":\"insert\",\"lines\":[\"d\"]}],[{\"start\":{\"row\":35,\"column\":14},\"end\":{\"row\":35,\"column\":15},\"action\":\"insert\",\"lines\":[\"=\"],\"id\":409}]]},\"ace\":{\"folds\":[],\"scrolltop\":439.5,\"scrollleft\":0,\"selection\":{\"start\":{\"row\":35,\"column\":15},\"end\":{\"row\":35,\"column\":15},\"isBackwards\":false},\"options\":{\"guessTabSize\":true,\"useWrapMode\":false,\"wrapToView\":true},\"firstLineState\":{\"row\":26,\"state\":\"start\",\"mode\":\"ace/mode/python\"}},\"timestamp\":1595063222568,\"hash\":\"5e254036c6adabee2cc8fd34ca538d47c56fc31e\"}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', admin.site.urls),
path('upload/', include('links.urls')),
]
|
normal
|
{
"blob_id": "45e8bdacad4ed293f7267d96abc9cbe8c8e192ae",
"index": 4148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', admin.site.urls), path('upload/', include(\n 'links.urls'))]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import include, path\nurlpatterns = [path('', admin.site.urls), path('upload/', include(\n 'links.urls'))]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path('', admin.site.urls),\n path('upload/', include('links.urls')),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i,c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return "".join(stringBuilder)
Solution().minRemoveToMakeValid('L(ee)(t(()coe')
|
normal
|
{
"blob_id": "1bab6b039462bb5762aa588d5ba7c3e74362d0a7",
"index": 823,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def minRemoveToMakeValid(self, s: str) ->str:\n bracketsToRemove = set()\n stack = []\n for i, c in enumerate(s):\n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i, c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n return ''.join(stringBuilder)\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def minRemoveToMakeValid(self, s: str) ->str:\n bracketsToRemove = set()\n stack = []\n for i, c in enumerate(s):\n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i, c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n return ''.join(stringBuilder)\n\n\nSolution().minRemoveToMakeValid('L(ee)(t(()coe')\n",
"step-5": "class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n bracketsToRemove = set()\n stack = []\n \n for i, c in enumerate(s):\n \n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n \n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i,c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n \n return \"\".join(stringBuilder)\n\n\nSolution().minRemoveToMakeValid('L(ee)(t(()coe')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import ftplib
def ftp_download():
file_remote = 'ftp_upload.jpg'
file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg'
bufsize = 1024
fp = open(file_local, 'wb')
f.retrbinary('RETR ' + file_remote, fp.write, bufsize)
fp.close()
def ftp_upload():
file_remote = 'test_upload.jpg'
file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_upload.jpg'
bufsize = 1024
fp = open(file_local, 'rb')
f.storbinary('STOR ' + file_remote, fp, bufsize)
fp.close()
if __name__ == '__main__':
host = 'www.aiforu.com'
username = 'admin_camera'
password = 'QcZ8M9aDga'
f = ftplib.FTP(host)
f.login(username, password)
pwd_path = f.pwd()
print("FTP当前路径:", pwd_path)
ftp_download()
ftp_upload()
f.quit()
|
normal
|
{
"blob_id": "a1b85d140c45f082ceac54ad8aa9aa5c3659d5cf",
"index": 9661,
"step-1": "<mask token>\n\n\ndef ftp_download():\n file_remote = 'ftp_upload.jpg'\n file_local = (\n '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg')\n bufsize = 1024\n fp = open(file_local, 'wb')\n f.retrbinary('RETR ' + file_remote, fp.write, bufsize)\n fp.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ftp_download():\n file_remote = 'ftp_upload.jpg'\n file_local = (\n '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg')\n bufsize = 1024\n fp = open(file_local, 'wb')\n f.retrbinary('RETR ' + file_remote, fp.write, bufsize)\n fp.close()\n\n\ndef ftp_upload():\n file_remote = 'test_upload.jpg'\n file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_upload.jpg'\n bufsize = 1024\n fp = open(file_local, 'rb')\n f.storbinary('STOR ' + file_remote, fp, bufsize)\n fp.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ftp_download():\n file_remote = 'ftp_upload.jpg'\n file_local = (\n '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg')\n bufsize = 1024\n fp = open(file_local, 'wb')\n f.retrbinary('RETR ' + file_remote, fp.write, bufsize)\n fp.close()\n\n\ndef ftp_upload():\n file_remote = 'test_upload.jpg'\n file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_upload.jpg'\n bufsize = 1024\n fp = open(file_local, 'rb')\n f.storbinary('STOR ' + file_remote, fp, bufsize)\n fp.close()\n\n\nif __name__ == '__main__':\n host = 'www.aiforu.com'\n username = 'admin_camera'\n password = 'QcZ8M9aDga'\n f = ftplib.FTP(host)\n f.login(username, password)\n pwd_path = f.pwd()\n print('FTP当前路径:', pwd_path)\nftp_download()\nftp_upload()\nf.quit()\n",
"step-4": "import ftplib\n\n\ndef ftp_download():\n file_remote = 'ftp_upload.jpg'\n file_local = (\n '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg')\n bufsize = 1024\n fp = open(file_local, 'wb')\n f.retrbinary('RETR ' + file_remote, fp.write, bufsize)\n fp.close()\n\n\ndef ftp_upload():\n file_remote = 'test_upload.jpg'\n file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_upload.jpg'\n bufsize = 1024\n fp = open(file_local, 'rb')\n f.storbinary('STOR ' + file_remote, fp, bufsize)\n fp.close()\n\n\nif __name__ == '__main__':\n host = 'www.aiforu.com'\n username = 'admin_camera'\n password = 'QcZ8M9aDga'\n f = ftplib.FTP(host)\n f.login(username, password)\n pwd_path = f.pwd()\n print('FTP当前路径:', pwd_path)\nftp_download()\nftp_upload()\nf.quit()\n",
"step-5": "\nimport ftplib\n\ndef ftp_download():\n file_remote = 'ftp_upload.jpg'\n file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_download.jpg'\n bufsize = 1024\n fp = open(file_local, 'wb')\n f.retrbinary('RETR ' + file_remote, fp.write, bufsize)\n fp.close()\n \ndef ftp_upload():\n file_remote = 'test_upload.jpg'\n file_local = '/home/pi/Desktop/Camera-Rasp-Arduino-sensors/test_upload.jpg'\n bufsize = 1024\n fp = open(file_local, 'rb')\n f.storbinary('STOR ' + file_remote, fp, bufsize)\n fp.close()\n\n \nif __name__ == '__main__':\n host = 'www.aiforu.com'\n username = 'admin_camera'\n password = 'QcZ8M9aDga'\n \n \n f = ftplib.FTP(host)\n f.login(username, password)\n \n \n pwd_path = f.pwd()\n print(\"FTP当前路径:\", pwd_path)\n \n \n \nftp_download()\nftp_upload()\nf.quit()\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
a = input().split(' ')
A = int(a[0])
B = int(a[1])
X = int(a[2])
if A <= X and A + B >= X:
print('YES')
else:
print('NO')
|
normal
|
{
"blob_id": "9a60449aa13bc5e7e413d0e47a1972d93ccfe69f",
"index": 7194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-3": "a = input().split(' ')\nA = int(a[0])\nB = int(a[1])\nX = int(a[2])\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 2.2.10 on 2020-03-13 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0005_location'),
]
operations = [
migrations.AddField(
model_name='setting',
name='runned_locations_initial_data',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(max_length=128, unique=True),
),
]
|
normal
|
{
"blob_id": "211ef4c64e42c54423ac8dab2128952874a2cf5a",
"index": 7694,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n",
"step-5": "# Generated by Django 2.2.10 on 2020-03-13 14:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('system', '0005_location'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='setting',\n name='runned_locations_initial_data',\n field=models.BooleanField(blank=True, default=False),\n ),\n migrations.AlterField(\n model_name='location',\n name='name',\n field=models.CharField(max_length=128, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def slope(self):
pass
def distance(self):
#x = self.coor1[0]-self.coor2[0]
#y = self.coor2[1]-self.coor2[1]
#return ((x**2)+(y**2))**0.5
return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)
def slope(self):
return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])
def __str__(self):
return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
line1 = Line((3,4),(5,6))
li = Line((3,2),(8,10))
print(li.distance())
print(line1.coor1[0])
print(line1.distance())
print(li)
class Cylinder:
pi = 3.14
def __init__(self,height=1,radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi*self.radius**2*self.height
def surface_area(self):
return 2*self.pi*self.radius**2
def __str__(self):
return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
c = Cylinder(2,3)
print(c)
class Account:
def __init__(self,name,balance):
self.name=name
self.balance=balance
def deposit(self,money):
self.balance += money
return 'Deposit accepted'
def withdraw(self,moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
acct1 = Account('jose',100)
print(acct1)
print(acct1.withdraw(1000))
print(acct1.balance)
print(acct1.deposit(101))
print(acct1.balance)
|
normal
|
{
"blob_id": "f91e997b305348485698d180b97138b040285b60",
"index": 9440,
"step-1": "<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-2": "class Line:\n <mask token>\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-3": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-4": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n\n def slope(self):\n pass\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\nline1 = Line((3, 4), (5, 6))\nli = Line((3, 2), (8, 10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\nc = Cylinder(2, 3)\nprint(c)\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\nacct1 = Account('jose', 100)\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-5": "class Line:\n def __init__(self,coor1,coor2):\n self.coor1 = coor1\n self.coor2 = coor2 \n \n def slope(self):\n pass\n\n def distance(self):\n #x = self.coor1[0]-self.coor2[0]\n #y = self.coor2[1]-self.coor2[1]\n #return ((x**2)+(y**2))**0.5\n return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)\n\n def slope(self):\n return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])\n\n\n\n def __str__(self):\n return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n\n\nline1 = Line((3,4),(5,6))\nli = Line((3,2),(8,10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\nclass Cylinder:\n\n pi = 3.14\n \n def __init__(self,height=1,radius=1):\n self.height = height\n self.radius = radius\n\n\n def volume(self):\n return self.pi*self.radius**2*self.height\n\n def surface_area(self):\n return 2*self.pi*self.radius**2\n\n def __str__(self):\n return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n\nc = Cylinder(2,3)\n\nprint(c)\n\nclass Account: \n def __init__(self,name,balance):\n self.name=name\n self.balance=balance\n\n def deposit(self,money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self,moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable' \n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n \n \n\n \n\nacct1 = Account('jose',100)\n\n\n\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-ids": [
5,
15,
16,
19,
20
]
}
|
[
5,
15,
16,
19,
20
] |
import os
error_msg = '''The default transformer cannot handle slashes (subdirectories);
try another transformer in vlermv.transformers.'''
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return (key,)
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
|
normal
|
{
"blob_id": "e4ff6d689a7da5b16786fd59d6a4707b9b6e3e7d",
"index": 8076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-3": "<mask token>\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-4": "import os\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-5": "import os\n\nerror_msg = '''The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.'''\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n\n return (key,)\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import sys
from domain import *
from fuzzy_set import *
from parser import *
class FuzzyControler(object):
def __init__(self, angle_rules, acc_rules, domains_angle, domains_acc):
self.angle_rules = angle_rules
self.acc_rules = acc_rules
self.domains_angle = domains_angle
self.domains_acc = domains_acc
self.intervals = []
self.intervals.append(0)
i = 1
while i <= 2048:
self.intervals.append(i)
i *= 2
def calculateNewAccAndAngle(self, L, D, LK, DK, V, S):
left = (L, LK, S)
right = (D, DK, S)
left = self.transformToInterval(left)
right = self.transformToInterval(right)
left_acc = (left[0], right[0], V)
right_acc = (left[1], right[1], V)
angle = self.calcAngle(left, right, self.angle_rules, self.domains_angle)
acc = self.calcAcc(left_acc, right_acc, self.acc_rules, self.domains_acc)
sys.stderr.write(str(angle) + " " + str(acc) + "\n")
return acc, angle
def calcAngle(self, left, right, angle_rules, domains):
# print domains
angle_domain = domains["angle_domain"]
cardinality = angle_domain.getCardinality()
new_memberships = []
domain_elements = angle_domain.getElements()
# print "here"
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
# sys.stderr.write(str(left_elem) + "\n")
# sys.stderr.write(str(right_elem) + "\n")
# print right_elem
max_ = 0
for rule in angle_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LEFT"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def calcAcc(self, left, right, acc_rules, domains):
# print domains
acc_domain = domains["acc_domain"]
cardinality = acc_domain.getCardinality()
new_memberships = []
domain_elements = acc_domain.getElements()
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
max_ = 0
for rule in acc_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LD"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
sys.stderr.write(str(right_elem) + "\n")
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def centerOfArea(self, memberships, elements):
# print memberships, elements
# print len(memberships)
result = 0
numerator = 0
denominator = 0
for i in range(len(memberships)):
numerator += memberships[i] * elements[i]
denominator += memberships[i]
if denominator == 0:
return 0
result = float(numerator) / denominator
return result
def transformToInterval(self, elem):
val = list(elem)
for i in range(len(elem)):
for j in range(1, len(self.intervals)):
if elem[i] < self.intervals[j] and elem[i] >= self.intervals[j-1]:
val[i] = self.intervals[j-1]
return tuple(val)
def main():
domains_angle = {}
domains_acc = {}
sets_angle = {}
sets_acc = {}
operators = {}
operators["+"] = ("ZadehS",)
operators["*"] = ("ZadehT",)
operators["!"] = ("ZadehNot",)
operators["->"] = ("'max-min'",)
parser = Parser(sys.argv[1], domains_angle, sets_angle, operators)
parser.parse()
sets_angle = parser.rules
parser = Parser(sys.argv[2], domains_acc, sets_acc, operators)
parser.parse()
sets_acc = parser.rules
controler = FuzzyControler(sets_angle, sets_acc, domains_angle, domains_acc)
while True:
# print "here"
line = sys.stdin.readline()
if line == "KRAJ\n":
break
L,D,LK,DK,V,S = [int(s) for s in line.split() if s.isdigit()]
akcel, kormilo = controler.calculateNewAccAndAngle(L, D, LK, DK, V, S)
print akcel, kormilo
sys.stdout.flush()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "7451b09c54734fb02167d43b96df972420d86853",
"index": 7776,
"step-1": "import sys\n\nfrom domain import *\nfrom fuzzy_set import *\nfrom parser import *\n\nclass FuzzyControler(object):\n\n\tdef __init__(self, angle_rules, acc_rules, domains_angle, domains_acc):\n\n\t\tself.angle_rules = angle_rules\n\t\tself.acc_rules = acc_rules\n\t\tself.domains_angle = domains_angle\n\t\tself.domains_acc = domains_acc\n\n\t\tself.intervals = []\n\t\tself.intervals.append(0)\n\n\t\ti = 1\n\t\twhile i <= 2048:\n\t\t\tself.intervals.append(i)\n\t\t\ti *= 2\n\n\tdef calculateNewAccAndAngle(self, L, D, LK, DK, V, S):\n\n\t\tleft = (L, LK, S)\n\t\tright = (D, DK, S)\n\n\t\tleft = self.transformToInterval(left)\n\t\tright = self.transformToInterval(right)\n\n\t\tleft_acc = (left[0], right[0], V)\n\t\tright_acc = (left[1], right[1], V)\n\n\t\tangle = self.calcAngle(left, right, self.angle_rules, self.domains_angle)\n\t\tacc = self.calcAcc(left_acc, right_acc, self.acc_rules, self.domains_acc)\n\t\tsys.stderr.write(str(angle) + \" \" + str(acc) + \"\\n\")\n\t\treturn acc, angle\n\n\tdef calcAngle(self, left, right, angle_rules, domains):\n\n\t\t# print domains\n\t\tangle_domain = domains[\"angle_domain\"]\n\t\tcardinality = angle_domain.getCardinality()\n\t\tnew_memberships = []\n\t\tdomain_elements = angle_domain.getElements()\n\n\t\t# print \"here\"\n\n\t\tfor i in range(cardinality):\n# \n\t\t\ty = domain_elements[i]\n\n\t\t\tleft_elem = left + (y,)\n\t\t\tright_elem = right + (y,)\n\n\t\t\t# sys.stderr.write(str(left_elem) + \"\\n\")\n\t\t\t# sys.stderr.write(str(right_elem) + \"\\n\")\n\t\t\t# print right_elem\n\n\t\t\tmax_ = 0\n\n\t\t\tfor rule in angle_rules:\n\t\t\t\t# print \"here\"\n\t\t\t\t# print rule.name\n\t\t\t\tif rule.name.startswith(\"RULE_LEFT\"):\n\t\t\t\t\t# print \"here\"\n\t\t\t\t\tmin_ = rule.getMembershipFor(left_elem)\n\t\t\t\telse:\n\t\t\t\t\tmin_ = rule.getMembershipFor(right_elem)\n\n\t\t\t\t# print min_\n\t\t\t\tif min_ > max_:\n\t\t\t\t\tmax_ = min_\n\n\t\t\tnew_memberships.append(max_)\n\n\t\t# print self.centerOfArea(new_memberships, domain_elements)\n\t\tresult = int(self.centerOfArea(new_memberships, domain_elements))\n\t\treturn result\n\n\tdef calcAcc(self, left, right, acc_rules, domains):\n\n\t\t# print domains\n\t\tacc_domain = domains[\"acc_domain\"]\n\t\tcardinality = acc_domain.getCardinality()\n\t\tnew_memberships = []\n\t\tdomain_elements = acc_domain.getElements()\n\n\t\tfor i in range(cardinality):\n# \n\t\t\ty = domain_elements[i]\n\n\t\t\tleft_elem = left + (y,)\n\t\t\tright_elem = right + (y,)\n\n\t\t\tmax_ = 0\n\n\t\t\tfor rule in acc_rules:\n\t\t\t\t# print \"here\"\n\t\t\t\t# print rule.name\n\t\t\t\tif rule.name.startswith(\"RULE_LD\"):\n\t\t\t\t\t# print \"here\"\n\t\t\t\t\tmin_ = rule.getMembershipFor(left_elem)\n\t\t\t\telse:\n\t\t\t\t\tsys.stderr.write(str(right_elem) + \"\\n\")\n\t\t\t\t\tmin_ = rule.getMembershipFor(right_elem)\n\n\t\t\t\t# print min_\n\t\t\t\tif min_ > max_:\n\t\t\t\t\tmax_ = min_\n\n\t\t\tnew_memberships.append(max_)\n\n\t\t# print self.centerOfArea(new_memberships, domain_elements)\n\t\tresult = int(self.centerOfArea(new_memberships, domain_elements))\n\t\treturn result\n\n\tdef centerOfArea(self, memberships, elements):\n\n\t\t# print memberships, elements\n\t\t# print len(memberships)\n\t\tresult = 0\n\t\tnumerator = 0\n\t\tdenominator = 0\n\n\t\tfor i in range(len(memberships)):\n\n\t\t\tnumerator += memberships[i] * elements[i]\n\t\t\tdenominator += memberships[i]\n\n\t\tif denominator == 0:\n\t\t\treturn 0\n\t\tresult = float(numerator) / denominator\n\t\treturn result\n\n\tdef transformToInterval(self, elem):\n\n\t\tval = list(elem)\n\t\tfor i in range(len(elem)):\n\t\t\tfor j in range(1, len(self.intervals)):\n\n\t\t\t\tif elem[i] < self.intervals[j] and elem[i] >= self.intervals[j-1]:\n\t\t\t\t\tval[i] = self.intervals[j-1]\n\n\t\treturn tuple(val)\n\ndef main():\n\n\tdomains_angle = {}\n\tdomains_acc = {}\n\tsets_angle = {}\n\tsets_acc = {}\n\toperators = {}\n\toperators[\"+\"] = (\"ZadehS\",)\n\toperators[\"*\"] = (\"ZadehT\",)\n\toperators[\"!\"] = (\"ZadehNot\",)\n\toperators[\"->\"] = (\"'max-min'\",)\n\n\tparser = Parser(sys.argv[1], domains_angle, sets_angle, operators)\n\tparser.parse()\n\tsets_angle = parser.rules\n\n\tparser = Parser(sys.argv[2], domains_acc, sets_acc, operators)\n\tparser.parse()\n\tsets_acc = parser.rules\n\n\tcontroler = FuzzyControler(sets_angle, sets_acc, domains_angle, domains_acc)\n\n\twhile True:\n\t \t\n\t \t# print \"here\"\n\t\tline = sys.stdin.readline()\n\n\t\tif line == \"KRAJ\\n\":\n\t \t\tbreak\n\n\t\tL,D,LK,DK,V,S = [int(s) for s in line.split() if s.isdigit()]\n\t\takcel, kormilo = controler.calculateNewAccAndAngle(L, D, LK, DK, V, S)\n\n\t\tprint akcel, kormilo\n\t\tsys.stdout.flush()\n\nif __name__ == \"__main__\":\n\tmain()\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__author__ = 'Administrator'
class People:
def __init__(self,name,age):
self.name = name
self.age = age
def eat(self):
pass
print("%s is eating..." % self.name)
def sleep(self):
print("%s is sleeping..." % self.name)
def talk(self):
print("%s is talking..." % self.name)
class Man(People):
def __init__(self,name,age,money):
# People.__init__(self,name,age)
super(Man,self).__init__(name,age)
self.money = money
print("%s 一出生就有%s money..." % (name,money))
def piao(self):
print("%s is piaoing...20s...isdone" % self.name)
def sleep(self):
#People.sleep(self)
print("man is sleeping")
class Women(People):
pass
def get_birth(self):
print("%s is born a baby...." % self.name)
m1 = Man("chenronghua",22,10000)
m1.eat()
m1.sleep()
m1.talk()
m1.piao()
w1 = Women("ronghua",26)
w1.get_birth()
|
normal
|
{
"blob_id": "6fdc9b2091652b05d6c1207d2f78b75c880fadda",
"index": 9084,
"step-1": "<mask token>\n\n\nclass People:\n <mask token>\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n <mask token>\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\nm1.eat()\nm1.sleep()\nm1.talk()\nm1.piao()\n<mask token>\nw1.get_birth()\n",
"step-5": "__author__ = 'Administrator'\n\n\nclass People:\n def __init__(self,name,age):\n self.name = name\n self.age = age\n def eat(self):\n pass\n print(\"%s is eating...\" % self.name)\n\n def sleep(self):\n print(\"%s is sleeping...\" % self.name)\n\n def talk(self):\n print(\"%s is talking...\" % self.name)\n\nclass Man(People):\n def __init__(self,name,age,money):\n # People.__init__(self,name,age)\n super(Man,self).__init__(name,age)\n\n self.money = money\n print(\"%s 一出生就有%s money...\" % (name,money))\n def piao(self):\n print(\"%s is piaoing...20s...isdone\" % self.name)\n\n def sleep(self):\n #People.sleep(self)\n print(\"man is sleeping\")\n\nclass Women(People):\n pass\n def get_birth(self):\n print(\"%s is born a baby....\" % self.name)\n\n\n\nm1 = Man(\"chenronghua\",22,10000)\n\nm1.eat()\n\nm1.sleep()\n\nm1.talk()\n\nm1.piao()\n\n\nw1 = Women(\"ronghua\",26)\n\nw1.get_birth()\n\n\n\n\n\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
import math_series.series as func
""" Testing for fibonacci function """
def test_fibonacci_zero():
actual = func.fibonacci(0)
expected = 0
assert actual == expected
def test_fibonacci_one():
actual = func.fibonacci(1)
expected = 1
assert actual == expected
def test_fibonacci_negative():
actual = func.fibonacci(-5)
expected = "Negative values are not allowable"
assert actual == expected
def test_fibonacci_else():
actual = func.fibonacci(6)
expected = 8
assert actual == expected
""" Testing for lucas function """
def test_lucas_zero():
actual = func.lucas(0)
expected = 2
assert actual == expected
def test_lucas_one():
actual = func.lucas(1)
expected = 1
assert actual == expected
def test_lucas_negative():
actual = func.lucas(-5)
expected = "Negative values are not allowable"
assert actual == expected
def test_lucas_else():
actual = func.lucas(6)
expected = 18
assert actual == expected
""" Testing for non_fibonacci_lucas function """
def test_non_fibonacci_lucas_zero():
actual = func.non_fibonacci_lucas(0,2,4)
expected = 2
assert actual == expected
def test_non_fibonacci_lucas_one():
actual = func.non_fibonacci_lucas(1,2,4)
expected = 4
assert actual == expected
def test_non_fibonacci_lucas_negative():
actual = func.non_fibonacci_lucas(-5,2,4)
expected = "Negative values are not allowable"
assert actual == expected
def test_non_fibonacci_lucas_else():
actual = func.non_fibonacci_lucas(3,2,4)
expected = 10
assert actual == expected
|
normal
|
{
"blob_id": "49722f640eec02029865fd702e13e485eda6391b",
"index": 8126,
"step-1": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n",
"step-2": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n",
"step-3": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n",
"step-4": "import math_series.series as func\n<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_lucas_else():\n actual = func.lucas(6)\n expected = 18\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n",
"step-5": "import math_series.series as func\n\n\"\"\" Testing for fibonacci function \"\"\"\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\"\"\" Testing for lucas function \"\"\"\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_lucas_else():\n actual = func.lucas(6)\n expected = 18\n assert actual == expected\n\n\n\"\"\" Testing for non_fibonacci_lucas function \"\"\"\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0,2,4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1,2,4)\n expected = 4\n assert actual == expected\n\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5,2,4)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3,2,4)\n expected = 10\n assert actual == expected",
"step-ids": [
8,
9,
11,
13,
14
]
}
|
[
8,
9,
11,
13,
14
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 09:46:47 2020
@author: Carlos Jose Munoz
"""
# se importa el modelo y vista para que sesten comunicados por medio del controlador
from Modelo import ventanadentrada
from Vista import Ventanainicio,dosventana
import sys
from PyQt5.QtWidgets import QApplication
class Controlador(object): #objeto que va a recibir los comandos de la interfaz para enviarselos al modelo y desarrollar la accion necesaria
def __init__(self, vista,modelo,vista2):
self._mi_vista=vista #atributo para la apertura de la primera ventana
self._mi_modelo= modelo #apertura del modelo
self._mi2_ventana=vista2 #apertura de la segunda ventana
def recibirruta(self, r): #recibe la ruta del archivo y se la pasa al modelo
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile): #recibe el tipo de archivo para poder hacer el cambio de ventana
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self,l):#devuelve los valores iniciales de tiempo segun el tipo de señal
mini, maxi=self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self,ch,m,mx): #retorna la señal (todos o un solo canal) y los valores de tiempo que se vana graficar
senal= self._mi_modelo.graph(ch,m,mx)
return senal
def filtrar(self,ch,tr,tw,tt): #retorna la señal (canal) original y la señal filtrada que devuelve el modelo dependiendo del tipo del filtro
senal, senalfiltrada= self._mi_modelo.filtrar(ch,tr,tw,tt)
return senal, senalfiltrada
def guardarfil(self,ch,archivo): #recibe la ruta, nombre de archivo y canal para guardar la señal filtrada
self._mi_modelo.guardarfil(ch,archivo)
def esposible(self): #habilita el botón de guardar señal filtrada
return self._mi_modelo.possiblesave
if __name__ == '__main__': #inicio del programa, es el programa principal que se corre
app=QApplication(sys.argv)
mi_vista=Ventanainicio(); #objeto asociado a la ventana inicial
mi_modelo=ventanadentrada();# objeto asociado al modelo
mi_2vista=dosventana(); #objeto asociado a la ventana de visualizacion
mi_controlador= Controlador(mi_vista,mi_modelo,mi_2vista)# objeto que enlaza las ventanas con los modelos
#asignarle el controlador a la vista
mi_vista.asignarcontrolador(mi_controlador) #se usa para realizar el enlace entre la vista y el controlador
mi_vista.show() #genera la ventana inicial
app.exec_();
if (mi_modelo.changepage==1): #si es posible pasar a la segunda ventana se genera la ventana secundaria
mi_2vista.asignarcontrolador(mi_controlador)
mi_2vista.show();
sys.exit(app.exec_());
|
normal
|
{
"blob_id": "3329db63552592aabb751348efc5d983f2cc3f36",
"index": 1828,
"step-1": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n <mask token>\n <mask token>\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n <mask token>\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n\n def guardarfil(self, ch, archivo):\n self._mi_modelo.guardarfil(ch, archivo)\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n\n def guardarfil(self, ch, archivo):\n self._mi_modelo.guardarfil(ch, archivo)\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mi_vista = Ventanainicio()\n mi_modelo = ventanadentrada()\n mi_2vista = dosventana()\n mi_controlador = Controlador(mi_vista, mi_modelo, mi_2vista)\n mi_vista.asignarcontrolador(mi_controlador)\n mi_vista.show()\n app.exec_()\n if mi_modelo.changepage == 1:\n mi_2vista.asignarcontrolador(mi_controlador)\n mi_2vista.show()\n sys.exit(app.exec_())\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 21 09:46:47 2020\n\n@author: Carlos Jose Munoz\n\"\"\"\n# se importa el modelo y vista para que sesten comunicados por medio del controlador \nfrom Modelo import ventanadentrada\nfrom Vista import Ventanainicio,dosventana\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication \n\nclass Controlador(object): #objeto que va a recibir los comandos de la interfaz para enviarselos al modelo y desarrollar la accion necesaria \n def __init__(self, vista,modelo,vista2): \n self._mi_vista=vista #atributo para la apertura de la primera ventana \n self._mi_modelo= modelo #apertura del modelo \n self._mi2_ventana=vista2 #apertura de la segunda ventana \n \n def recibirruta(self, r): #recibe la ruta del archivo y se la pasa al modelo \n self._mi_modelo.recibirruta(r)\n \n def recibirtipodearchivo(self, tipefile): #recibe el tipo de archivo para poder hacer el cambio de ventana\n self._mi_modelo.recibirtipodearchivo(tipefile)\n \n \n def loadsignals(self,l):#devuelve los valores iniciales de tiempo segun el tipo de señal \n mini, maxi=self._mi_modelo.loadsignals(l)\n return mini, maxi\n \n def graph(self,ch,m,mx): #retorna la señal (todos o un solo canal) y los valores de tiempo que se vana graficar \n senal= self._mi_modelo.graph(ch,m,mx)\n return senal\n \n def filtrar(self,ch,tr,tw,tt): #retorna la señal (canal) original y la señal filtrada que devuelve el modelo dependiendo del tipo del filtro \n senal, senalfiltrada= self._mi_modelo.filtrar(ch,tr,tw,tt)\n return senal, senalfiltrada\n \n def guardarfil(self,ch,archivo): #recibe la ruta, nombre de archivo y canal para guardar la señal filtrada \n self._mi_modelo.guardarfil(ch,archivo)\n \n def esposible(self): #habilita el botón de guardar señal filtrada \n return self._mi_modelo.possiblesave\n \nif __name__ == '__main__': #inicio del programa, es el programa principal que se corre \n app=QApplication(sys.argv)\n mi_vista=Ventanainicio(); #objeto asociado a la ventana inicial \n mi_modelo=ventanadentrada();# objeto asociado al modelo \n mi_2vista=dosventana(); #objeto asociado a la ventana de visualizacion \n mi_controlador= Controlador(mi_vista,mi_modelo,mi_2vista)# objeto que enlaza las ventanas con los modelos \n \n #asignarle el controlador a la vista\n mi_vista.asignarcontrolador(mi_controlador) #se usa para realizar el enlace entre la vista y el controlador \n \n mi_vista.show() #genera la ventana inicial \n \n \n app.exec_();\n if (mi_modelo.changepage==1): #si es posible pasar a la segunda ventana se genera la ventana secundaria \n \n mi_2vista.asignarcontrolador(mi_controlador)\n \n mi_2vista.show();\n sys.exit(app.exec_());\n\n\n \n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
# Lahman.py
# Convert to/from web native JSON and Python/RDB types.
import json
# Include Flask packages
from flask import Flask
from flask import request
import copy
import SimpleBO
# The main program that executes. This call creates an instance of a
# class and the constructor starts the runtime.
app = Flask(__name__)
def parse_and_print_args():
fields = None
in_args = None
if request.args is not None:
in_args = dict(copy.copy(request.args))
fields = copy.copy(in_args.get('fields',None))
if fields:
del(in_args['fields'])
offset = copy.copy(in_args.get('offset',None))
if offset:
del(in_args['offset'])
limit = copy.copy(in_args.get('limit',None))
if limit:
del(in_args['limit'])
try:
if request.data:
body = json.loads(request.data)
else:
body = None
except Exception as e:
print("exception here is: ", e)
body = None
print("Request.args : ", json.dumps(in_args))
return in_args,fields,body,limit,offset
@app.route('/api/<resource>',methods = ['GET','POST'])
def Basic_resource(resource):
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_by_template(resource,in_args,fields,limit,offset)
url = request.url
url_root = request.url_root
links = SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)
output=[{"data":result,
"links":links}]
return json.dumps(output), 200, \
{"content-type": "application/json; charset:utf-8"}
elif request.method == 'POST':
result = SimpleBO.Insert(resource,body)
return result
else:
return "Method " + request.method + " on resource " + resource + \
" not implemented!", 501, {"content-type": "text/plain; charset: utf-8"}
@app.route('/api/<resource>/<primary_key>',methods = ['GET','PUT','DELETE'])
def Specific_resource(resource,primary_key):
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_by_primary_key(resource,primary_key,fields)
return json.dumps(result), 200, \
{"content-type": "application/json; charset:utf-8"}
elif request.method == 'PUT':
result = SimpleBO.Update(resource,body,primary_key)
return json.dumps(result), 200, \
{"content-type": "application/json; charset:utf-8"}
elif request.method == 'DELETE':
result = SimpleBO.Delete(resource,primary_key)
return result
else:
return "Method " + request.method + " on resource " + resource + \
" not implemented!", 501, {"content-type": "text/plain; charset: utf-8"}
@app.route('/api/<resource>/<primary_key>/<related_resource>',methods = ['GET','POST'])
def related_resource(resource,primary_key,related_resource):
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_by_fk(resource,primary_key,related_resource,in_args,fields,limit,offset)
url = request.url
url_root = request.url_root
all_resource = resource+"/"+primary_key+"/"+related_resource
links=SimpleBO.generate_links(url,url_root,all_resource,in_args,fields,offset,limit,result)
output=[{"data":result,
"links":links}]
return json.dumps(output), 200, \
{"content-type": "application/json; charset:utf-8"}
elif request.method == 'POST':
result = SimpleBO.Insert(related_resource,body)
return json.dumps(result), 200, \
{"content-type": "application/json; charset:utf-8"}
else:
return "Method " + request.method + " on resource " + resource + \
" not implemented!", 501, {"content-type": "text/plain; charset: utf-8"}
@app.route('/api/teammates/<playerid>', methods=['GET'])
def get_teammates(playerid):
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_teammates(playerid,limit,offset)
url = request.url
url_root = request.url_root
resource = 'teammates/'+playerid
links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)
output=[{"data":result,
"links":links}]
return json.dumps(output), 200, \
{"content-type": "application/json; charset:utf-8"}
@app.route('/api/people/<playerid>/career_stats', methods=['GET'])
def get_career_stats(playerid):
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_career_stats(playerid,limit,offset)
url = request.url
url_root = request.url_root
resource = 'people/'+playerid+'/career_stats'
links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)
output=[{"data":result,
"links":links}]
return json.dumps(output), 200, \
{"content-type": "application/json; charset:utf-8"}
else:
return "Method " + request.method + " on resource " + resource + \
" not implemented!", 501, {"content-type": "text/plain; charset: utf-8"}
@app.route('/api/roster', methods=['GET'])
def get_roster():
in_args,fields,body,offset,limit = parse_and_print_args()
if request.method == 'GET':
result = SimpleBO.find_roster(in_args,limit,offset)
url = request.url
url_root = request.url_root
resource = 'roster'
links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)
output=[{"data":result,
"links":links}]
return json.dumps(output), 200, \
{"content-type": "application/json; charset:utf-8"}
else:
return "Method " + request.method + " on resource " + resource + \
" not implemented!", 501, {"content-type": "text/plain; charset: utf-8"}
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "d03a8076b77851ae4df5cf657ff898eb132c49c3",
"index": 5616,
"step-1": "<mask token>\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\[email protected]('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\[email protected]('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\[email protected]('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\[email protected]('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\[email protected]('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\[email protected]('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "import json\nfrom flask import Flask\nfrom flask import request\nimport copy\nimport SimpleBO\napp = Flask(__name__)\n\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields', None))\n if fields:\n del in_args['fields']\n offset = copy.copy(in_args.get('offset', None))\n if offset:\n del in_args['offset']\n limit = copy.copy(in_args.get('limit', None))\n if limit:\n del in_args['limit']\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print('exception here is: ', e)\n body = None\n print('Request.args : ', json.dumps(in_args))\n return in_args, fields, body, limit, offset\n\n\[email protected]('/api/<resource>', methods=['GET', 'POST'])\ndef Basic_resource(resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource, in_args, fields, limit,\n offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource, body)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>', methods=['GET', 'PUT', 'DELETE'])\ndef Specific_resource(resource, primary_key):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource, primary_key, fields)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource, body, primary_key)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource, primary_key)\n return result\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/<resource>/<primary_key>/<related_resource>', methods=[\n 'GET', 'POST'])\ndef related_resource(resource, primary_key, related_resource):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource, primary_key,\n related_resource, in_args, fields, limit, offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource + '/' + primary_key + '/' + related_resource\n links = SimpleBO.generate_links(url, url_root, all_resource,\n in_args, fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource, body)\n return json.dumps(result), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/' + playerid\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n\n\[email protected]('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/' + playerid + '/career_stats'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\[email protected]('/api/roster', methods=['GET'])\ndef get_roster():\n in_args, fields, body, offset, limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args, limit, offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links = SimpleBO.generate_links(url, url_root, resource, in_args,\n fields, offset, limit, result)\n output = [{'data': result, 'links': links}]\n return json.dumps(output), 200, {'content-type':\n 'application/json; charset:utf-8'}\n else:\n return ('Method ' + request.method + ' on resource ' + resource +\n ' not implemented!', 501, {'content-type':\n 'text/plain; charset: utf-8'})\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "# Lahman.py\n\n# Convert to/from web native JSON and Python/RDB types.\nimport json\n\n# Include Flask packages\nfrom flask import Flask\nfrom flask import request\nimport copy\n\nimport SimpleBO\n\n# The main program that executes. This call creates an instance of a\n# class and the constructor starts the runtime.\napp = Flask(__name__)\n\ndef parse_and_print_args():\n fields = None\n in_args = None\n if request.args is not None:\n in_args = dict(copy.copy(request.args))\n fields = copy.copy(in_args.get('fields',None))\n if fields:\n del(in_args['fields'])\n offset = copy.copy(in_args.get('offset',None))\n if offset:\n del(in_args['offset'])\n limit = copy.copy(in_args.get('limit',None))\n if limit:\n del(in_args['limit'])\n try:\n if request.data:\n body = json.loads(request.data)\n else:\n body = None\n except Exception as e:\n print(\"exception here is: \", e)\n body = None\n\n\n\n print(\"Request.args : \", json.dumps(in_args))\n return in_args,fields,body,limit,offset\n\n\n\n\n\[email protected]('/api/<resource>',methods = ['GET','POST'])\ndef Basic_resource(resource):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_template(resource,in_args,fields,limit,offset)\n url = request.url\n url_root = request.url_root\n links = SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'POST':\n result = SimpleBO.Insert(resource,body)\n return result\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\[email protected]('/api/<resource>/<primary_key>',methods = ['GET','PUT','DELETE'])\ndef Specific_resource(resource,primary_key):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_primary_key(resource,primary_key,fields)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'PUT':\n result = SimpleBO.Update(resource,body,primary_key)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'DELETE':\n result = SimpleBO.Delete(resource,primary_key)\n return result\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\[email protected]('/api/<resource>/<primary_key>/<related_resource>',methods = ['GET','POST'])\ndef related_resource(resource,primary_key,related_resource):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_by_fk(resource,primary_key,related_resource,in_args,fields,limit,offset)\n url = request.url\n url_root = request.url_root\n all_resource = resource+\"/\"+primary_key+\"/\"+related_resource\n links=SimpleBO.generate_links(url,url_root,all_resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n elif request.method == 'POST':\n result = SimpleBO.Insert(related_resource,body)\n return json.dumps(result), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\[email protected]('/api/teammates/<playerid>', methods=['GET'])\ndef get_teammates(playerid):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_teammates(playerid,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'teammates/'+playerid\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\[email protected]('/api/people/<playerid>/career_stats', methods=['GET'])\ndef get_career_stats(playerid):\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_career_stats(playerid,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'people/'+playerid+'/career_stats'\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\[email protected]('/api/roster', methods=['GET'])\ndef get_roster():\n\n in_args,fields,body,offset,limit = parse_and_print_args()\n if request.method == 'GET':\n result = SimpleBO.find_roster(in_args,limit,offset)\n url = request.url\n url_root = request.url_root\n resource = 'roster'\n links=SimpleBO.generate_links(url,url_root,resource,in_args,fields,offset,limit,result)\n output=[{\"data\":result,\n \"links\":links}]\n return json.dumps(output), 200, \\\n {\"content-type\": \"application/json; charset:utf-8\"}\n\n else:\n return \"Method \" + request.method + \" on resource \" + resource + \\\n \" not implemented!\", 501, {\"content-type\": \"text/plain; charset: utf-8\"}\n\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import os
import random
import string
# Use cryptographic-safe random generator as provided by the OS.
random_generator = random.SystemRandom()
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters +
string.digits)
for _ in range(length))
# Request ID Counter
####################
req_c = None
pid = None
def request_id():
# Using random is pretty slow. This is way quicker.
# It uses cached proc id. Then only does this append counter.
# per request...
#
# It may not be as unique, but highly unlikely to collide
# with recent requet ids.
global req_c, pid
if req_c is None:
req_c = random.randint(1000*1000, 1000*1000*1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
|
normal
|
{
"blob_id": "cdbf9427d48f0a5c53b6efe0de7dfea65a8afd83",
"index": 87,
"step-1": "<mask token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-2": "<mask token>\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\n<mask token>\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-3": "<mask token>\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-4": "import os\nimport random\nimport string\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters + string.digits) for\n _ in range(length))\n\n\nreq_c = None\npid = None\n\n\ndef request_id():\n global req_c, pid\n if req_c is None:\n req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)\n if pid is None:\n pid = str(os.getpid())\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n return pid + '-' + req_id\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\nimport os\nimport random\nimport string\n\n# Use cryptographic-safe random generator as provided by the OS.\nrandom_generator = random.SystemRandom()\n\n\ndef string_id(length=8):\n \"\"\" Generate Random ID.\n\n Random ID contains ascii letters and digitis.\n\n Args:\n length (int): Character length of id.\n\n Returns:\n Random id string.\n \"\"\"\n return ''.join(random.choice(string.ascii_letters +\n string.digits)\n for _ in range(length))\n\n\n# Request ID Counter\n####################\n\nreq_c = None\npid = None\n\n\ndef request_id():\n # Using random is pretty slow. This is way quicker.\n # It uses cached proc id. Then only does this append counter.\n # per request...\n #\n # It may not be as unique, but highly unlikely to collide\n # with recent requet ids.\n global req_c, pid\n\n if req_c is None:\n req_c = random.randint(1000*1000, 1000*1000*1000)\n\n if pid is None:\n pid = str(os.getpid())\n\n req_id = req_c = req_c + 1\n req_id = hex(req_id)[2:].zfill(8)[-8:]\n\n return pid + '-' + req_id\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import numpy as np
from datetime import timedelta
import scipy.optimize as optim
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gen_utils.gen_io import read_run_params,log_msg
#############################################
params = read_run_params()
run = params["current_run"]
out_home = params["container"]+"output/"
out_dir = out_home+run+"/"
df = pd.read_csv(out_dir+"4_mcov_strain_variant_map_covid_pangolin_db_input_"+run+".csv")
df = df[df.quality=="HQ"]
#########################
tag="B.1.617.Family"
voi=["B.1.617.2","AY.2","AY.3"]
start_date = "4-15-2021"
end_date = "7-20-2021"
days_since="4/15/2021"
days= 180
# voi="P.1"
# start_date = "1-1-2021"
# end_date = "6-20-2021"
# days_since="1/1/2021"
# days= 360
#################################
###take unique patients with variant
keep_mrns_variant = np.unique(df[df.variant.isin(voi)]["MRN"])
df_mrns = df[df.MRN.isin(keep_mrns_variant)]
df_mrns = df_mrns[df_mrns.variant.isin(voi)] ###important step--remove non b117 variant
df_mrns.sort_values("COLLECTION_DT",inplace=True)
df_mrns.drop_duplicates("MRN",keep="first",inplace=True)
keep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]["MRN"])
df_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]
df_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin(voi)]
df_mrns_not_variant.sort_values("COLLECTION_DT",inplace=True)
df_mrns_not_variant.drop_duplicates("MRN",keep="first",inplace=True)
df_2 = df_mrns.append(df_mrns_not_variant)
df_2.drop_duplicates("MRN",keep="first",inplace=True)
df = df_2
df=df[['MCoVNumber','COLLECTION_DT','variant']]
#####################################
df.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)
df.COLLECTION_DT = df.COLLECTION_DT.dt.date
df = df[ ( (df.COLLECTION_DT>=pd.to_datetime(start_date)) &
(df.COLLECTION_DT<pd.to_datetime(end_date))
)
]
df.sort_values("COLLECTION_DT",inplace=True)
df.variant.fillna(0,inplace=True)
#########################
df.variant = [1 if x in voi else 0 for x in df.variant]
df_variant = df.groupby("COLLECTION_DT")["variant"].agg("sum").reset_index()
df_count = df.groupby("COLLECTION_DT")["variant"].agg("count").reset_index()
dates = pd.date_range(df.COLLECTION_DT.min(), (df.COLLECTION_DT.max() + timedelta(days=1) )-timedelta(days=1),freq='d')
df_data = pd.DataFrame(dates)
df_data.columns=["dates"]
df_data["date_step"]= [x for x in range(1,df_data.shape[0]+1,1)]
df_data["total"] = df_count.variant
df_data["variant"] = df_variant.variant
df_data["variant_csum"] = np.cumsum(df_variant.variant.values)
df_data["variant_percent"]=[ (x/y)*100 for x,y in zip(df_data.variant,df_data.total)]
df_data.to_excel("final_Data_"+tag+"_log_growth_6_28_2021.xlsx",index=False)
def my_logistic(x,a,b,c):
return c/(1 + a * np.exp(-b*x))
x = np.array(df_data.date_step)
# y = np.array(df_data.variant_csum)
y = np.array(df_data.variant_percent)
##########optimize
po = np.random.exponential(size=3)
bounds = (0,[1000.,2.0,100.])
(a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# for i in range(1,20,1):
# try:
# # po = np.array([250.,0.10,99.])
# po= np.random.exponential(size=3)
# bounds = ([0.,0.1,0.],[1000.,float(i),100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# print(c)
# except:
# print("error for " + str(i))
# po = np.array([250.,0.10,99.])
# bounds = ([0.,0.1,99.],[1000.,1.0,100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
plt.scatter(x,y)
plt.plot(x,my_logistic(x,a,b,c))
xprime = np.array([x for x in range(1,170,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime)
plt.savefig("log_fit_best_fit"+tag+".png")
plt.close()
############################## method 2 using t distribution on error --> perfer this one
from scipy.stats.distributions import t
pars, pcov = (a,b,c),cov
alpha = 0.05 # 95% confidence interval = 100*(1-alpha)
n = len(y) # number of data points
p = len(pars) # number of parameters
dof = max(0, n - p) # number of degrees of freedom
# student-t value for the dof and confidence level
tval = t.ppf(1.0-alpha/2., dof)
val_dw = 0
val_up = 0
for i, p,var in zip(range(n), pars, np.diag(pcov)):
sigma = var**0.5
if i==1:
val_dw = p - sigma*tval
val_up = p + sigma*tval
print ('p{0}: {1} [{2} {3}]'.format(i, p,
p - sigma*tval,
p + sigma*tval))
plt.plot(x,y,'bo',markersize=5,label='Observed')
xprime = np.array([x for x in range(1,days,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime,label='Predicted')
xpred = np.array([x for x in range(1,days,1)])
ypred_dw = my_logistic(xpred,pars[0],val_dw,pars[2])
ypred_up = my_logistic(xpred,pars[0],val_up,pars[2])
plt.fill_between(xpred, ypred_up,ypred_dw,color = 'k', alpha = 0.1,label='95% CI')
plt.title("Logistic growth model ["+tag+"]",fontsize=18)
plt.xlabel("Days since "+days_since,fontsize=15)
plt.ylabel("Percent of patients ",fontsize=15)
plt.legend()
plt.savefig("log_pred_best_fit"+tag+".png")
plt.close()
gr=b;dt = 70/(gr*100);print(dt)
gr=val_up;dt = 70/(gr*100);print(dt)
gr=val_dw;dt = 70/(gr*100);print(dt)
|
normal
|
{
"blob_id": "dcef5f34a62939d992a109e991552e612bf5bad5",
"index": 4619,
"step-1": "<mask token>\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\n<mask token>\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\n<mask token>\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\n<mask token>\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\n<mask token>\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\n<mask token>\nplt.plot(xprime, yprime, label='Predicted')\n<mask token>\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\n<mask token>\nprint(dt)\n<mask token>\nprint(dt)\n<mask token>\nprint(dt)\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nparams = read_run_params()\nrun = params['current_run']\nout_home = params['container'] + 'output/'\nout_dir = out_home + run + '/'\ndf = pd.read_csv(out_dir +\n '4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')\ndf = df[df.quality == 'HQ']\ntag = 'B.1.617.Family'\nvoi = ['B.1.617.2', 'AY.2', 'AY.3']\nstart_date = '4-15-2021'\nend_date = '7-20-2021'\ndays_since = '4/15/2021'\ndays = 180\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)]\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin\n (voi)]\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\ndf = df_2\ndf = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\ndf = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.\n COLLECTION_DT < pd.to_datetime(end_date))]\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\ndf.variant = [(1 if x in voi else 0) for x in df.variant]\ndf_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()\ndf_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()\ndates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +\n timedelta(days=1) - timedelta(days=1), freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns = ['dates']\ndf_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]\ndf_data['total'] = df_count.variant\ndf_data['variant'] = df_variant.variant\ndf_data['variant_csum'] = np.cumsum(df_variant.variant.values)\ndf_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,\n df_data.total)]\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\nx = np.array(df_data.date_step)\ny = np.array(df_data.variant_percent)\npo = np.random.exponential(size=3)\nbounds = 0, [1000.0, 2.0, 100.0]\n(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\nxprime = np.array([x for x in range(1, 170, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\n<mask token>\npars, pcov = (a, b, c), cov\nalpha = 0.05\nn = len(y)\np = len(pars)\ndof = max(0, n - p)\ntval = t.ppf(1.0 - alpha / 2.0, dof)\nval_dw = 0\nval_up = 0\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\nxprime = np.array([x for x in range(1, days, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime, label='Predicted')\nxpred = np.array([x for x in range(1, days, 1)])\nypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])\nypred_up = my_logistic(xpred, pars[0], val_up, pars[2])\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\ngr = b\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_up\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_dw\ndt = 70 / (gr * 100)\nprint(dt)\n",
"step-4": "import pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport scipy.optimize as optim\nfrom scipy import stats\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom gen_utils.gen_io import read_run_params, log_msg\nparams = read_run_params()\nrun = params['current_run']\nout_home = params['container'] + 'output/'\nout_dir = out_home + run + '/'\ndf = pd.read_csv(out_dir +\n '4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')\ndf = df[df.quality == 'HQ']\ntag = 'B.1.617.Family'\nvoi = ['B.1.617.2', 'AY.2', 'AY.3']\nstart_date = '4-15-2021'\nend_date = '7-20-2021'\ndays_since = '4/15/2021'\ndays = 180\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)]\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin\n (voi)]\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\ndf = df_2\ndf = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\ndf = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.\n COLLECTION_DT < pd.to_datetime(end_date))]\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\ndf.variant = [(1 if x in voi else 0) for x in df.variant]\ndf_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()\ndf_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()\ndates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +\n timedelta(days=1) - timedelta(days=1), freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns = ['dates']\ndf_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]\ndf_data['total'] = df_count.variant\ndf_data['variant'] = df_variant.variant\ndf_data['variant_csum'] = np.cumsum(df_variant.variant.values)\ndf_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,\n df_data.total)]\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\nx = np.array(df_data.date_step)\ny = np.array(df_data.variant_percent)\npo = np.random.exponential(size=3)\nbounds = 0, [1000.0, 2.0, 100.0]\n(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\nxprime = np.array([x for x in range(1, 170, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\nfrom scipy.stats.distributions import t\npars, pcov = (a, b, c), cov\nalpha = 0.05\nn = len(y)\np = len(pars)\ndof = max(0, n - p)\ntval = t.ppf(1.0 - alpha / 2.0, dof)\nval_dw = 0\nval_up = 0\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\nxprime = np.array([x for x in range(1, days, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime, label='Predicted')\nxpred = np.array([x for x in range(1, days, 1)])\nypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])\nypred_up = my_logistic(xpred, pars[0], val_up, pars[2])\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\ngr = b\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_up\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_dw\ndt = 70 / (gr * 100)\nprint(dt)\n",
"step-5": "import pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport scipy.optimize as optim\nfrom scipy import stats\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom gen_utils.gen_io import read_run_params,log_msg\n\n\n\n#############################################\n\nparams = read_run_params()\nrun = params[\"current_run\"]\nout_home = params[\"container\"]+\"output/\" \nout_dir = out_home+run+\"/\"\n\ndf = pd.read_csv(out_dir+\"4_mcov_strain_variant_map_covid_pangolin_db_input_\"+run+\".csv\")\ndf = df[df.quality==\"HQ\"]\n\n\n \n#########################\ntag=\"B.1.617.Family\"\nvoi=[\"B.1.617.2\",\"AY.2\",\"AY.3\"]\nstart_date = \"4-15-2021\"\nend_date = \"7-20-2021\"\ndays_since=\"4/15/2021\"\ndays= 180\n\n# voi=\"P.1\"\n# start_date = \"1-1-2021\"\n# end_date = \"6-20-2021\"\n# days_since=\"1/1/2021\"\n# days= 360\n#################################\n\n\n###take unique patients with variant\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)][\"MRN\"])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)] ###important step--remove non b117 variant \ndf_mrns.sort_values(\"COLLECTION_DT\",inplace=True)\ndf_mrns.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\n\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)][\"MRN\"])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin(voi)]\ndf_mrns_not_variant.sort_values(\"COLLECTION_DT\",inplace=True)\ndf_mrns_not_variant.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\ndf = df_2\n\n\ndf=df[['MCoVNumber','COLLECTION_DT','variant']]\n\n#####################################\n\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\n\n\ndf = df[ ( (df.COLLECTION_DT>=pd.to_datetime(start_date)) &\n (df.COLLECTION_DT<pd.to_datetime(end_date)) \n )\n ]\ndf.sort_values(\"COLLECTION_DT\",inplace=True)\n\ndf.variant.fillna(0,inplace=True)\n#########################\n\ndf.variant = [1 if x in voi else 0 for x in df.variant]\n\n\ndf_variant = df.groupby(\"COLLECTION_DT\")[\"variant\"].agg(\"sum\").reset_index()\ndf_count = df.groupby(\"COLLECTION_DT\")[\"variant\"].agg(\"count\").reset_index()\n\ndates = pd.date_range(df.COLLECTION_DT.min(), (df.COLLECTION_DT.max() + timedelta(days=1) )-timedelta(days=1),freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns=[\"dates\"]\ndf_data[\"date_step\"]= [x for x in range(1,df_data.shape[0]+1,1)]\ndf_data[\"total\"] = df_count.variant\ndf_data[\"variant\"] = df_variant.variant\ndf_data[\"variant_csum\"] = np.cumsum(df_variant.variant.values)\ndf_data[\"variant_percent\"]=[ (x/y)*100 for x,y in zip(df_data.variant,df_data.total)]\ndf_data.to_excel(\"final_Data_\"+tag+\"_log_growth_6_28_2021.xlsx\",index=False)\n\ndef my_logistic(x,a,b,c):\n return c/(1 + a * np.exp(-b*x))\n\nx = np.array(df_data.date_step)\n# y = np.array(df_data.variant_csum)\ny = np.array(df_data.variant_percent)\n\n##########optimize\npo = np.random.exponential(size=3)\nbounds = (0,[1000.,2.0,100.])\n(a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n\n# for i in range(1,20,1):\n# try:\n# # po = np.array([250.,0.10,99.])\n# po= np.random.exponential(size=3)\n# bounds = ([0.,0.1,0.],[1000.,float(i),100.])\n# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n# print(c)\n# except:\n# print(\"error for \" + str(i))\n\n# po = np.array([250.,0.10,99.])\n# bounds = ([0.,0.1,99.],[1000.,1.0,100.])\n# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n\nplt.scatter(x,y)\nplt.plot(x,my_logistic(x,a,b,c))\nxprime = np.array([x for x in range(1,170,1)])\nyprime = my_logistic(xprime,a,b,c)\nplt.plot(xprime,yprime)\nplt.savefig(\"log_fit_best_fit\"+tag+\".png\")\nplt.close()\n\n\n############################## method 2 using t distribution on error --> perfer this one \n\nfrom scipy.stats.distributions import t\n\npars, pcov = (a,b,c),cov\n\nalpha = 0.05 # 95% confidence interval = 100*(1-alpha)\n\nn = len(y) # number of data points\np = len(pars) # number of parameters\n\ndof = max(0, n - p) # number of degrees of freedom\n\n# student-t value for the dof and confidence level\ntval = t.ppf(1.0-alpha/2., dof) \n\nval_dw = 0\nval_up = 0\nfor i, p,var in zip(range(n), pars, np.diag(pcov)):\n sigma = var**0.5\n \n if i==1:\n val_dw = p - sigma*tval\n val_up = p + sigma*tval\n\n print ('p{0}: {1} [{2} {3}]'.format(i, p,\n p - sigma*tval,\n p + sigma*tval))\n\n\n\nplt.plot(x,y,'bo',markersize=5,label='Observed')\nxprime = np.array([x for x in range(1,days,1)])\nyprime = my_logistic(xprime,a,b,c)\nplt.plot(xprime,yprime,label='Predicted')\n\nxpred = np.array([x for x in range(1,days,1)])\nypred_dw = my_logistic(xpred,pars[0],val_dw,pars[2])\nypred_up = my_logistic(xpred,pars[0],val_up,pars[2])\n\nplt.fill_between(xpred, ypred_up,ypred_dw,color = 'k', alpha = 0.1,label='95% CI')\n\nplt.title(\"Logistic growth model [\"+tag+\"]\",fontsize=18)\nplt.xlabel(\"Days since \"+days_since,fontsize=15)\nplt.ylabel(\"Percent of patients \",fontsize=15)\n\nplt.legend()\nplt.savefig(\"log_pred_best_fit\"+tag+\".png\")\nplt.close()\n\n\ngr=b;dt = 70/(gr*100);print(dt)\ngr=val_up;dt = 70/(gr*100);print(dt)\ngr=val_dw;dt = 70/(gr*100);print(dt)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""1) Написать бота-консультанта, который будет собирать информацию с
пользователя (его ФИО, номер телефона, почта, адресс, пожелания).
Записывать сформированную заявку в БД (по желанию SQl/NOSQL).)."""
import telebot
from .config import TOKEN
from telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton
from mongoengine import *
bot = telebot.TeleBot(TOKEN)
data = {}
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
|
normal
|
{
"blob_id": "dcb2351f9489815fbec8694b446d0a93972a6590",
"index": 6388,
"step-1": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\n<mask token>\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-3": "<mask token>\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-4": "<mask token>\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-5": "\"\"\"1) Написать бота-консультанта, который будет собирать информацию с\nпользователя (его ФИО, номер телефона, почта, адресс, пожелания).\nЗаписывать сформированную заявку в БД (по желанию SQl/NOSQL).).\"\"\"\n\n\nimport telebot\nfrom .config import TOKEN\nfrom telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\nfrom mongoengine import *\n\n\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
"""
Counted List
Create a class for an list like object based on UserList wrapper
https://docs.python.org/3/library/collections.html#collections.UserList
That object should have a method to return a Counter
https://docs.python.org/3/library/collections.html#collections.Counter
for all objects in the list
Counter should be updated automatically for at lest 2 methods (append, pop)
"""
# example to test code
# class Example(UserList)
# ...
#
# x = Example(['1', '2', '3'])
# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})
# x.append(3)
# now y contains Counter({'1':1, '2':1 '3':2})
from collections import UserList,Counter
class CountedList(UserList):
def Count(self):
self.cnt=Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList,self).append(item)
global y
y = self.Count()
countedlist=CountedList(['1', '2', '3'])
y=countedlist.Count()
print(y)
countedlist.append('3')
print(y)
|
normal
|
{
"blob_id": "1cf4fc37e030a895cb36f537ce9e92df34acfb8b",
"index": 7659,
"step-1": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-3": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-4": "<mask token>\nfrom collections import UserList, Counter\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-5": "\"\"\"\nCounted List\nCreate a class for an list like object based on UserList wrapper\nhttps://docs.python.org/3/library/collections.html#collections.UserList\nThat object should have a method to return a Counter\nhttps://docs.python.org/3/library/collections.html#collections.Counter\nfor all objects in the list\nCounter should be updated automatically for at lest 2 methods (append, pop)\n\"\"\"\n\n# example to test code\n# class Example(UserList)\n# ...\n#\n# x = Example(['1', '2', '3'])\n# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})\n# x.append(3)\n# now y contains Counter({'1':1, '2':1 '3':2})\n\nfrom collections import UserList,Counter\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt=Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList,self).append(item)\n global y\n y = self.Count()\n\ncountedlist=CountedList(['1', '2', '3'])\ny=countedlist.Count()\nprint(y)\n\ncountedlist.append('3')\nprint(y)\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import pylab as pb
from .. import kern
from ..core import model
from ..util.linalg import pdinv,mdot
from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango
from ..likelihoods import EP
class GP(model):
"""
Gaussian Process model for regression and EP
:param X: input observations
:param kernel: a GPy kernel, defaults to rbf+white
:parm likelihood: a GPy likelihood
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_X: False|True
:param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_Y: False|True
:param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing)
:rtype: model object
:param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1
:param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.]
:type powerep: list
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None):
# parse arguments
self.Xslices = Xslices
self.X = X
assert len(self.X.shape)==2
self.N, self.Q = self.X.shape
assert isinstance(kernel, kern.kern)
self.kern = kernel
#here's some simple normalization for the inputs
if normalize_X:
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
"""
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)
if samples == 0:
m,v = self._raw_predict(Xnew, slices=which_functions)
gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
else:
m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)
Ysim = np.random.multivariate_normal(m.flatten(),v,samples)
gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])
for i in range(samples):
pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25)
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
pb.xlim(xmin,xmax)
ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None])))
ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)
pb.ylim(ymin,ymax)
if hasattr(self,'Z'):
pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)
elif self.X.shape[1] == 2:
resolution = resolution or 50
Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution)
m,v = self._raw_predict(Xnew, slices=which_functions)
m = m.reshape(resolution,resolution).T
pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)
pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max())
pb.xlim(xmin[0],xmax[0])
pb.ylim(xmin[1],xmax[1])
else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20):
"""
TODO: Docstrings!
:param levels: for 2D plotting, the number of contour levels to use
"""
# TODO include samples
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now
Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits)
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
gpplot(Xnew,m, lower, upper)
pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5)
ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper))
ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)
pb.xlim(xmin,xmax)
pb.ylim(ymin,ymax)
if hasattr(self,'Z'):
Zu = self.Z*self._Xstd + self._Xmean
pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)
if self.has_uncertain_inputs:
pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten()))
elif self.X.shape[1]==2: #FIXME
resolution = resolution or 50
Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution)
x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution)
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
m = m.reshape(resolution,resolution).T
pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)
Yf = self.likelihood.Y.flatten()
pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.)
pb.xlim(xmin[0],xmax[0])
pb.ylim(xmin[1],xmax[1])
if hasattr(self,'Z'):
pb.plot(self.Z[:,0],self.Z[:,1],'wo')
else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
normal
|
{
"blob_id": "2ae953d1d53c47da10ea4c8aace186eba0708ad0",
"index": 3874,
"step-1": "# Copyright (c) 2012, GPy authors (see AUTHORS.txt).\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\n\nimport numpy as np\nimport pylab as pb\nfrom .. import kern\nfrom ..core import model\nfrom ..util.linalg import pdinv,mdot\nfrom ..util.plot import gpplot,x_frame1D,x_frame2D, Tango\nfrom ..likelihoods import EP\n\nclass GP(model):\n \"\"\"\n Gaussian Process model for regression and EP\n\n :param X: input observations\n :param kernel: a GPy kernel, defaults to rbf+white\n :parm likelihood: a GPy likelihood\n :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)\n :type normalize_X: False|True\n :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)\n :type normalize_Y: False|True\n :param Xslices: how the X,Y data co-vary in the kernel (i.e. which \"outputs\" they correspond to). See (link:slicing)\n :rtype: model object\n :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1\n :param powerep: power-EP parameters [$\\eta$,$\\delta$], defaults to [1.,1.]\n :type powerep: list\n\n .. Note:: Multiple independent outputs are allowed using columns of Y\n\n \"\"\"\n def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None):\n\n # parse arguments\n self.Xslices = Xslices\n self.X = X\n assert len(self.X.shape)==2\n self.N, self.Q = self.X.shape\n assert isinstance(kernel, kern.kern)\n self.kern = kernel\n\n #here's some simple normalization for the inputs\n if normalize_X:\n self._Xmean = X.mean(0)[None,:]\n self._Xstd = X.std(0)[None,:]\n self.X = (X.copy() - self._Xmean) / self._Xstd\n if hasattr(self,'Z'):\n self.Z = (self.Z - self._Xmean) / self._Xstd\n else:\n self._Xmean = np.zeros((1,self.X.shape[1]))\n self._Xstd = np.ones((1,self.X.shape[1]))\n\n self.likelihood = likelihood\n #assert self.X.shape[0] == self.likelihood.Y.shape[0]\n #self.N, self.D = self.likelihood.Y.shape\n assert self.X.shape[0] == self.likelihood.data.shape[0]\n self.N, self.D = self.likelihood.data.shape\n\n model.__init__(self)\n\n def dL_dZ(self):\n \"\"\"\n TODO: one day we might like to learn Z by gradient methods?\n \"\"\"\n return np.zeros_like(self.Z)\n\n def _set_params(self,p):\n self.kern._set_params_transformed(p[:self.kern.Nparam])\n #self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas\n self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas\n\n\n self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)\n self.K += self.likelihood.covariance_matrix\n\n self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)\n\n #the gradient of the likelihood wrt the covariance matrix\n if self.likelihood.YYT is None:\n alpha = np.dot(self.Ki,self.likelihood.Y)\n self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)\n else:\n tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)\n self.dL_dK = 0.5*(tmp - self.D*self.Ki)\n\n def _get_params(self):\n return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))\n\n def _get_param_names(self):\n return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()\n\n def update_likelihood_approximation(self):\n \"\"\"\n Approximates a non-gaussian likelihood using Expectation Propagation\n\n For a Gaussian (or direct: TODO) likelihood, no iteration is required:\n this function does nothing\n \"\"\"\n self.likelihood.fit_full(self.kern.K(self.X))\n self._set_params(self._get_params()) # update the GP\n\n def _model_fit_term(self):\n \"\"\"\n Computes the model fit using YYT if it's available\n \"\"\"\n if self.likelihood.YYT is None:\n return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))\n else:\n return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))\n\n def log_likelihood(self):\n \"\"\"\n The log marginal likelihood of the GP.\n\n For an EP model, can be written as the log likelihood of a regression\n model for a new variable Y* = v_tilde/tau_tilde, with a covariance\n matrix K* = K + diag(1./tau_tilde) plus a normalization term.\n \"\"\"\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z\n\n\n def _log_likelihood_gradients(self):\n \"\"\"\n The gradient of all parameters.\n\n For the kernel parameters, use the chain rule via dL_dK\n\n For the likelihood parameters, pass in alpha = K^-1 y\n \"\"\"\n return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))\n\n def _raw_predict(self,_Xnew,slices=None, full_cov=False):\n \"\"\"\n Internal helper function for making predictions, does not account\n for normalization or likelihood\n \"\"\"\n Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)\n mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)\n KiKx = np.dot(self.Ki,Kx)\n if full_cov:\n Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)\n var = Kxx - np.dot(KiKx.T,Kx)\n else:\n Kxx = self.kern.Kdiag(_Xnew, slices=slices)\n var = Kxx - np.sum(np.multiply(KiKx,Kx),0)\n var = var[:,None]\n return mu, var\n\n\n def predict(self,Xnew, slices=None, full_cov=False):\n \"\"\"\n Predict the function(s) at the new point(s) Xnew.\n\n Arguments\n ---------\n :param Xnew: The points at which to make a prediction\n :type Xnew: np.ndarray, Nnew x self.Q\n :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)\n :type slices: (None, list of slice objects, list of ints)\n :param full_cov: whether to return the folll covariance matrix, or just the diagonal\n :type full_cov: bool\n :rtype: posterior mean, a Numpy array, Nnew x self.D\n :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise\n :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D\n\n .. Note:: \"slices\" specifies how the the points X_new co-vary wich the training points.\n\n - If None, the new points covary throigh every kernel part (default)\n - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part\n - If a list of booleans, specifying which kernel parts are active\n\n If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.\n This is to allow for different normalizations of the output dimensions.\n\n \"\"\"\n #normalize X values\n Xnew = (Xnew.copy() - self._Xmean) / self._Xstd\n mu, var = self._raw_predict(Xnew, slices, full_cov)\n\n #now push through likelihood TODO\n mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)\n\n return mean, var, _025pm, _975pm\n\n\n def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):\n \"\"\"\n Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian\n\n :param samples: the number of a posteriori samples to plot\n :param which_data: which if the training data to plot (default all)\n :type which_data: 'all' or a slice object to slice self.X, self.Y\n :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits\n :param which_functions: which of the kernel functions to plot (additively)\n :type which_functions: list of bools\n :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D\n\n Plot the posterior of the GP.\n - In one dimension, the function is plotted with a shaded region identifying two standard deviations.\n - In two dimsensions, a contour-plot shows the mean predicted function\n - In higher dimensions, we've no implemented this yet !TODO!\n\n Can plot only part of the data and part of the posterior functions using which_data and which_functions\n Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood\n \"\"\"\n if which_functions=='all':\n which_functions = [True]*self.kern.Nparts\n if which_data=='all':\n which_data = slice(None)\n\n if self.X.shape[1] == 1:\n Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)\n if samples == 0:\n m,v = self._raw_predict(Xnew, slices=which_functions)\n gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))\n pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)\n else:\n m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)\n Ysim = np.random.multivariate_normal(m.flatten(),v,samples)\n gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])\n for i in range(samples):\n pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25)\n pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)\n pb.xlim(xmin,xmax)\n ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None])))\n ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)\n pb.ylim(ymin,ymax)\n if hasattr(self,'Z'):\n pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)\n\n elif self.X.shape[1] == 2:\n resolution = resolution or 50\n Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution)\n m,v = self._raw_predict(Xnew, slices=which_functions)\n m = m.reshape(resolution,resolution).T\n pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)\n pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max())\n pb.xlim(xmin[0],xmax[0])\n pb.ylim(xmin[1],xmax[1])\n else:\n raise NotImplementedError, \"Cannot define a frame with more than two input dimensions\"\n\n def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20):\n \"\"\"\n TODO: Docstrings!\n :param levels: for 2D plotting, the number of contour levels to use\n\n \"\"\"\n # TODO include samples\n if which_functions=='all':\n which_functions = [True]*self.kern.Nparts\n if which_data=='all':\n which_data = slice(None)\n\n if self.X.shape[1] == 1:\n\n Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now\n\n Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits)\n m, var, lower, upper = self.predict(Xnew, slices=which_functions)\n gpplot(Xnew,m, lower, upper)\n pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5)\n ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper))\n ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)\n pb.xlim(xmin,xmax)\n pb.ylim(ymin,ymax)\n if hasattr(self,'Z'):\n Zu = self.Z*self._Xstd + self._Xmean\n pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)\n if self.has_uncertain_inputs:\n pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten()))\n\n elif self.X.shape[1]==2: #FIXME\n resolution = resolution or 50\n Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution)\n x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution)\n m, var, lower, upper = self.predict(Xnew, slices=which_functions)\n m = m.reshape(resolution,resolution).T\n pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)\n Yf = self.likelihood.Y.flatten()\n pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.)\n pb.xlim(xmin[0],xmax[0])\n pb.ylim(xmin[1],xmax[1])\n if hasattr(self,'Z'):\n pb.plot(self.Z[:,0],self.Z[:,1],'wo')\n\n else:\n raise NotImplementedError, \"Cannot define a frame with more than two input dimensions\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.2.16 on 2020-11-04 12:48
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0002_auto_20201103_1648'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_video',
field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),
),
migrations.AlterField(
model_name='course',
name='brief',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),
),
]
|
normal
|
{
"blob_id": "afacc2c54584c070963c4cb3cabbae64bb0e3159",
"index": 1858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-4": "import ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-5": "# Generated by Django 2.2.16 on 2020-11-04 12:48\n\nimport ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('course', '0002_auto_20201103_1648'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='course_video',\n field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),\n ),\n migrations.AlterField(\n model_name='course',\n name='brief',\n field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
## Filename: name.py
# Author: Marcelo Feitoza Parisi
#
# Description: Report the objects
# on the bucket sorted by name.
#
# ###########################
# # DISCLAIMER - IMPORTANT! #
# ###########################
#
# Stuff found here was built as a
# Proof-Of-Concept or Study material
# and should not be considered
# production ready!
#
# USE WITH CARE!
##
from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
# Google Cloud Storage Client
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
# Will hold our local list of objects
blob_list = []
try:
for blob in blobs:
# For each object we'll save name, owner, class, size and date
this_blob = { 'name': blob.name,
'owner': blob.owner,
'class': blob.storage_class,
'size' : blob.size,
'date' : str(blob.updated).split('.')[0].split('+')[0]
}
# Append object to our list
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
# Sort our object list by name using our reverse_opt
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)
# Generating our PrettyTable
report_table = PrettyTable()
report_table.field_names = ["NAME", "OWNER", "CLASS", "SIZE", "DATE"]
report_table.align["NAME"] = "l"
report_table.align["SIZE"] = "r"
report_table.align["DATE"] = "r"
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
|
normal
|
{
"blob_id": "562b2c3567e42699cfd0804a5780af7ede142e13",
"index": 1056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"step-3": "from lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"step-4": "## Filename: name.py\n # Author: Marcelo Feitoza Parisi\n # \n # Description: Report the objects\n # on the bucket sorted by name.\n # \n # ###########################\n # # DISCLAIMER - IMPORTANT! #\n # ###########################\n # \n # Stuff found here was built as a\n # Proof-Of-Concept or Study material\n # and should not be considered\n # production ready!\n # \n # USE WITH CARE!\n##\nfrom lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\ndef exec(bucket_id, project_id, reverse_opt):\n\n # Google Cloud Storage Client\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n\n # Will hold our local list of objects\n blob_list = []\n\n try: \n for blob in blobs:\n # For each object we'll save name, owner, class, size and date\n this_blob = { 'name': blob.name,\n 'owner': blob.owner,\n 'class': blob.storage_class,\n 'size' : blob.size,\n 'date' : str(blob.updated).split('.')[0].split('+')[0]\n }\n # Append object to our list\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n\n # Sort our object list by name using our reverse_opt\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)\n\n # Generating our PrettyTable\n report_table = PrettyTable()\n report_table.field_names = [\"NAME\", \"OWNER\", \"CLASS\", \"SIZE\", \"DATE\"]\n report_table.align[\"NAME\"] = \"l\"\n report_table.align[\"SIZE\"] = \"r\"\n report_table.align[\"DATE\"] = \"r\"\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])\n\n print(report_table)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
# Create your models here.
class Position(models.Model):
title = models.CharField(max_length=50)
def __str__(self):
return self.title
class Employee(models.Model):
nom = models.CharField(max_length=100)
prenom = models.CharField(max_length=100)
age= models.CharField(max_length=15)
sexe= models.ForeignKey(Position,on_delete=models.CASCADE)
portable = models.CharField(max_length=100)
email = models.CharField(max_length=100)
formation = models.CharField(max_length=100000)
experiences1 = models.CharField(max_length=100000)
experiences2 = models.CharField(max_length=100000)
experiences3 = models.CharField(max_length=100000)
competences = models.CharField(max_length=100000)
divers = models.CharField(max_length=1000)
linkedin = models.CharField(max_length=1000)
CV=models.FileField(upload_to ='media/pdf')
"""
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.pdfpage import PDFPage
from io import BytesIO
import argparse
rsrcmgr = PDFResourceManager()
retstr = BytesIO()
device = TextConverter(rsrcmgr, retstr)
#with open(path, "rb") as fp: # open in 'rb' mode to read PDF bytes
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(CV, check_extractable=True):
interpreter.process_page(page)
device.close()
text = retstr.getvalue()
retstr.close()
pdf_text = text.decode("utf-8")
#########"""
def __str__(self):
return (str(self.CV)+"*"+" "+str(self.formation)+" "+str(self.competences)+" "+str(self.experiences1)+" "+str(self.experiences2)+" "+str(self.experiences3))
|
normal
|
{
"blob_id": "5ab20c1cd2dc0d0ad881ee52008d00c2317084f9",
"index": 5308,
"step-1": "<mask token>\n\n\nclass Position(models.Model):\n <mask token>\n <mask token>\n\n\nclass Employee(models.Model):\n nom = models.CharField(max_length=100)\n prenom = models.CharField(max_length=100)\n age = models.CharField(max_length=15)\n sexe = models.ForeignKey(Position, on_delete=models.CASCADE)\n portable = models.CharField(max_length=100)\n email = models.CharField(max_length=100)\n formation = models.CharField(max_length=100000)\n experiences1 = models.CharField(max_length=100000)\n experiences2 = models.CharField(max_length=100000)\n experiences3 = models.CharField(max_length=100000)\n competences = models.CharField(max_length=100000)\n divers = models.CharField(max_length=1000)\n linkedin = models.CharField(max_length=1000)\n CV = models.FileField(upload_to='media/pdf')\n \"\"\"\n from pdfminer.pdfinterp import PDFResourceManager\n from pdfminer.pdfinterp import PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.pdfpage import PDFPage\n from io import BytesIO \n import argparse\n rsrcmgr = PDFResourceManager()\n retstr = BytesIO()\n device = TextConverter(rsrcmgr, retstr)\n #with open(path, \"rb\") as fp: # open in 'rb' mode to read PDF bytes\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(CV, check_extractable=True):\n interpreter.process_page(page)\n device.close() \n text = retstr.getvalue()\n retstr.close()\n pdf_text = text.decode(\"utf-8\") \n #########\"\"\"\n\n def __str__(self):\n return str(self.CV) + '*' + ' ' + str(self.formation) + ' ' + str(self\n .competences) + ' ' + str(self.experiences1) + ' ' + str(self.\n experiences2) + ' ' + str(self.experiences3)\n",
"step-2": "<mask token>\n\n\nclass Position(models.Model):\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass Employee(models.Model):\n nom = models.CharField(max_length=100)\n prenom = models.CharField(max_length=100)\n age = models.CharField(max_length=15)\n sexe = models.ForeignKey(Position, on_delete=models.CASCADE)\n portable = models.CharField(max_length=100)\n email = models.CharField(max_length=100)\n formation = models.CharField(max_length=100000)\n experiences1 = models.CharField(max_length=100000)\n experiences2 = models.CharField(max_length=100000)\n experiences3 = models.CharField(max_length=100000)\n competences = models.CharField(max_length=100000)\n divers = models.CharField(max_length=1000)\n linkedin = models.CharField(max_length=1000)\n CV = models.FileField(upload_to='media/pdf')\n \"\"\"\n from pdfminer.pdfinterp import PDFResourceManager\n from pdfminer.pdfinterp import PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.pdfpage import PDFPage\n from io import BytesIO \n import argparse\n rsrcmgr = PDFResourceManager()\n retstr = BytesIO()\n device = TextConverter(rsrcmgr, retstr)\n #with open(path, \"rb\") as fp: # open in 'rb' mode to read PDF bytes\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(CV, check_extractable=True):\n interpreter.process_page(page)\n device.close() \n text = retstr.getvalue()\n retstr.close()\n pdf_text = text.decode(\"utf-8\") \n #########\"\"\"\n\n def __str__(self):\n return str(self.CV) + '*' + ' ' + str(self.formation) + ' ' + str(self\n .competences) + ' ' + str(self.experiences1) + ' ' + str(self.\n experiences2) + ' ' + str(self.experiences3)\n",
"step-3": "<mask token>\n\n\nclass Position(models.Model):\n title = models.CharField(max_length=50)\n\n def __str__(self):\n return self.title\n\n\nclass Employee(models.Model):\n nom = models.CharField(max_length=100)\n prenom = models.CharField(max_length=100)\n age = models.CharField(max_length=15)\n sexe = models.ForeignKey(Position, on_delete=models.CASCADE)\n portable = models.CharField(max_length=100)\n email = models.CharField(max_length=100)\n formation = models.CharField(max_length=100000)\n experiences1 = models.CharField(max_length=100000)\n experiences2 = models.CharField(max_length=100000)\n experiences3 = models.CharField(max_length=100000)\n competences = models.CharField(max_length=100000)\n divers = models.CharField(max_length=1000)\n linkedin = models.CharField(max_length=1000)\n CV = models.FileField(upload_to='media/pdf')\n \"\"\"\n from pdfminer.pdfinterp import PDFResourceManager\n from pdfminer.pdfinterp import PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.pdfpage import PDFPage\n from io import BytesIO \n import argparse\n rsrcmgr = PDFResourceManager()\n retstr = BytesIO()\n device = TextConverter(rsrcmgr, retstr)\n #with open(path, \"rb\") as fp: # open in 'rb' mode to read PDF bytes\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(CV, check_extractable=True):\n interpreter.process_page(page)\n device.close() \n text = retstr.getvalue()\n retstr.close()\n pdf_text = text.decode(\"utf-8\") \n #########\"\"\"\n\n def __str__(self):\n return str(self.CV) + '*' + ' ' + str(self.formation) + ' ' + str(self\n .competences) + ' ' + str(self.experiences1) + ' ' + str(self.\n experiences2) + ' ' + str(self.experiences3)\n",
"step-4": "from django.db import models\n\n\nclass Position(models.Model):\n title = models.CharField(max_length=50)\n\n def __str__(self):\n return self.title\n\n\nclass Employee(models.Model):\n nom = models.CharField(max_length=100)\n prenom = models.CharField(max_length=100)\n age = models.CharField(max_length=15)\n sexe = models.ForeignKey(Position, on_delete=models.CASCADE)\n portable = models.CharField(max_length=100)\n email = models.CharField(max_length=100)\n formation = models.CharField(max_length=100000)\n experiences1 = models.CharField(max_length=100000)\n experiences2 = models.CharField(max_length=100000)\n experiences3 = models.CharField(max_length=100000)\n competences = models.CharField(max_length=100000)\n divers = models.CharField(max_length=1000)\n linkedin = models.CharField(max_length=1000)\n CV = models.FileField(upload_to='media/pdf')\n \"\"\"\n from pdfminer.pdfinterp import PDFResourceManager\n from pdfminer.pdfinterp import PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.pdfpage import PDFPage\n from io import BytesIO \n import argparse\n rsrcmgr = PDFResourceManager()\n retstr = BytesIO()\n device = TextConverter(rsrcmgr, retstr)\n #with open(path, \"rb\") as fp: # open in 'rb' mode to read PDF bytes\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(CV, check_extractable=True):\n interpreter.process_page(page)\n device.close() \n text = retstr.getvalue()\n retstr.close()\n pdf_text = text.decode(\"utf-8\") \n #########\"\"\"\n\n def __str__(self):\n return str(self.CV) + '*' + ' ' + str(self.formation) + ' ' + str(self\n .competences) + ' ' + str(self.experiences1) + ' ' + str(self.\n experiences2) + ' ' + str(self.experiences3)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Position(models.Model):\n title = models.CharField(max_length=50)\n\n def __str__(self):\n return self.title\n\nclass Employee(models.Model):\n nom = models.CharField(max_length=100)\n prenom = models.CharField(max_length=100)\n age= models.CharField(max_length=15)\n sexe= models.ForeignKey(Position,on_delete=models.CASCADE)\n portable = models.CharField(max_length=100)\n email = models.CharField(max_length=100)\n formation = models.CharField(max_length=100000)\n experiences1 = models.CharField(max_length=100000)\n experiences2 = models.CharField(max_length=100000)\n experiences3 = models.CharField(max_length=100000)\n competences = models.CharField(max_length=100000)\n divers = models.CharField(max_length=1000)\n linkedin = models.CharField(max_length=1000)\n CV=models.FileField(upload_to ='media/pdf') \n \"\"\"\n from pdfminer.pdfinterp import PDFResourceManager\n from pdfminer.pdfinterp import PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.pdfpage import PDFPage\n from io import BytesIO \n import argparse\n rsrcmgr = PDFResourceManager()\n retstr = BytesIO()\n device = TextConverter(rsrcmgr, retstr)\n #with open(path, \"rb\") as fp: # open in 'rb' mode to read PDF bytes\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(CV, check_extractable=True):\n interpreter.process_page(page)\n device.close() \n text = retstr.getvalue()\n retstr.close()\n pdf_text = text.decode(\"utf-8\") \n #########\"\"\"\n def __str__(self):\n return (str(self.CV)+\"*\"+\" \"+str(self.formation)+\" \"+str(self.competences)+\" \"+str(self.experiences1)+\" \"+str(self.experiences2)+\" \"+str(self.experiences3))",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
path = r'D:\python\风变编程\python基础-山顶班\fb_16'
path1 = 'test_01'
path2 = 'fb_csv-01-获取网页内容.py'
print(os.getcwd()) # 返回当前工作目录
print(os.listdir(path)) # 返回path指定的文件夹包含的文件或文件夹的名字的列表
#print(os.mkdir(path1)) # 创建文件夹
print(os.path.abspath(path)) # 返回绝对路径
print(os.path.basename(path)) # 返回文件名
print(os.path.isfile(path2)) # 判断路径是否为文件
print(os.path.isdir(path1)) # 判断路径是否为目录
|
normal
|
{
"blob_id": "c01ea897cd64b3910531babe9fce8c61b750185d",
"index": 7912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(os.getcwd())\nprint(os.listdir(path))\nprint(os.path.abspath(path))\nprint(os.path.basename(path))\nprint(os.path.isfile(path2))\nprint(os.path.isdir(path1))\n",
"step-3": "<mask token>\npath = 'D:\\\\python\\\\风变编程\\\\python基础-山顶班\\\\fb_16'\npath1 = 'test_01'\npath2 = 'fb_csv-01-获取网页内容.py'\nprint(os.getcwd())\nprint(os.listdir(path))\nprint(os.path.abspath(path))\nprint(os.path.basename(path))\nprint(os.path.isfile(path2))\nprint(os.path.isdir(path1))\n",
"step-4": "import os\npath = 'D:\\\\python\\\\风变编程\\\\python基础-山顶班\\\\fb_16'\npath1 = 'test_01'\npath2 = 'fb_csv-01-获取网页内容.py'\nprint(os.getcwd())\nprint(os.listdir(path))\nprint(os.path.abspath(path))\nprint(os.path.basename(path))\nprint(os.path.isfile(path2))\nprint(os.path.isdir(path1))\n",
"step-5": "import os\npath = r'D:\\python\\风变编程\\python基础-山顶班\\fb_16'\npath1 = 'test_01'\npath2 = 'fb_csv-01-获取网页内容.py'\nprint(os.getcwd()) # 返回当前工作目录\nprint(os.listdir(path)) # 返回path指定的文件夹包含的文件或文件夹的名字的列表\n#print(os.mkdir(path1)) # 创建文件夹\nprint(os.path.abspath(path)) # 返回绝对路径\nprint(os.path.basename(path)) # 返回文件名\nprint(os.path.isfile(path2)) # 判断路径是否为文件\nprint(os.path.isdir(path1)) # 判断路径是否为目录",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
w1 = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5)]
w2 = [(-1, -4, -2), (-1, 1, 1),
(-1, -1, -3), (-1, -3, 2),
(-1, -5, -3.25), (-1, -2, -4),
(-1, -7, -1)]
dataset = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5), (-1, -4, -2),
(-1, 1, 1), (-1, -1, -3),
(-1, -3, 2), (-1, -5, -3.25),
(-1, -2, -4), (-1, -7, -1)]
# Single Perceptron function
def single_sample_perceptron():
weight = [1, 1, 1]
iterations = 0
while(1):
iterations = iterations+1
ans = 0
count = 0
eta = 0.2
# print weight
for j in xrange(len(dataset)):
ans = 0
for i in xrange(3):
ans = ans+float(weight[i]*dataset[j][i])
if(ans < 0):
for i in xrange(3):
weight[i] = weight[i]+eta*dataset[j][i]
break
count += 1
if count == len(dataset):
break
print
print "Final weights: ",
print weight
print "No. of Iterations: ",
print iterations
return weight
def main():
a = single_sample_perceptron()
x1 = x2 = y1 = y2 = []
for j in range(len(w1)):
x1.append(w1[j][1])
y1.append(w1[j][2])
for j in range(len(w2)):
x2.append((-1)*w2[j][1])
y2.append((-1)*w2[j][2])
plt.plot(x1, y1, 'ro')
plt.plot(x2, y2, 'bo')
m1 = a[2]/a[1]
m2 = (-1)/(m1)
c = (-1)*a[0]/a[2]
ya = m2*100+c
yb = m2*(-100)+c
plt.plot([100, -100], [ya, yb], 'r')
plt.axis([-10, 10, -10, 10])
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "15105e22b3c1860735f282a2247ab41b138d75cf",
"index": 3452,
"step-1": "import matplotlib.pyplot as plt\n\nw1 = [(1, 2, 7), (1, 8, 1),\n (1, 7, 5), (1, 6, 3),\n (1, 7, 8), (1, 5, 9),\n (1, 4, 5)]\nw2 = [(-1, -4, -2), (-1, 1, 1),\n (-1, -1, -3), (-1, -3, 2),\n (-1, -5, -3.25), (-1, -2, -4),\n (-1, -7, -1)]\ndataset = [(1, 2, 7), (1, 8, 1),\n (1, 7, 5), (1, 6, 3),\n (1, 7, 8), (1, 5, 9),\n (1, 4, 5), (-1, -4, -2),\n (-1, 1, 1), (-1, -1, -3),\n (-1, -3, 2), (-1, -5, -3.25),\n (-1, -2, -4), (-1, -7, -1)]\n\n\n# Single Perceptron function\ndef single_sample_perceptron():\n weight = [1, 1, 1]\n iterations = 0\n while(1):\n iterations = iterations+1\n ans = 0\n count = 0\n eta = 0.2\n # print weight\n for j in xrange(len(dataset)):\n ans = 0\n for i in xrange(3):\n ans = ans+float(weight[i]*dataset[j][i])\n if(ans < 0):\n for i in xrange(3):\n weight[i] = weight[i]+eta*dataset[j][i]\n break\n count += 1\n if count == len(dataset):\n break\n print\n print \"Final weights: \",\n print weight\n print \"No. of Iterations: \",\n print iterations\n\n return weight\n\n\ndef main():\n a = single_sample_perceptron()\n x1 = x2 = y1 = y2 = []\n for j in range(len(w1)):\n x1.append(w1[j][1])\n y1.append(w1[j][2])\n for j in range(len(w2)):\n x2.append((-1)*w2[j][1])\n y2.append((-1)*w2[j][2])\n\n plt.plot(x1, y1, 'ro')\n plt.plot(x2, y2, 'bo')\n m1 = a[2]/a[1]\n m2 = (-1)/(m1)\n c = (-1)*a[0]/a[2]\n ya = m2*100+c\n yb = m2*(-100)+c\n plt.plot([100, -100], [ya, yb], 'r')\n plt.axis([-10, 10, -10, 10])\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import time
from bitfinex_trade_client import BitfinexClient,BitfinexTradeClient
KEY = "nBi8YyJZZ9ZhSOf2jEpMAoBpzKt2Shh6IoLdTjFRYvb"
SECRET = "XO6FUYbhFYqBflXYSaKMiu1hGHLhGf63xsOK0Pf7osA"
class EMA:
def __init__(self, duration):
self.value = 0
self.duration = duration
self.count = 0
self.multiplier = 2.0 / (self.duration + 1)
def update(self, px):
if(self.count < self.duration):
self.count += 1
multiplier = 2.0/(self.count + 1)
self.value = multiplier * px + (1 - multiplier) * self.value
else:
self.value = self.multiplier * px + (1 - self.multiplier) * self.value
def ready(self):
return (self.count >= self.duration * 0.05)
class Order:
def __init__(self, amount, price, side, ord_type, symbol):
self.amount = amount
self.price = price
self.side = side
self.ord_type = ord_type
self.symbol = symbol
self.traded_amount = 0
self.traded_px = 0
self.id = -1
def __str__(self):
return "Order => Amount:" + str(self.amount) + "|Price:" + str(self.price) + "|Side:" + str(self.side)
class BitfinexMMTrader:
def __init__(self, key, secret, symbol, single_order_amount, total_amount, interval, duration, threshold, stop_loss):
self.symbol = symbol
self.sym1 = symbol[:3]
self.sym2 = symbol[3:]
self.trade_client = BitfinexTradeClient(key, secret)
self.client = BitfinexClient()
self.total_amount = total_amount
self.single_order_amount = single_order_amount
self.interval = interval
self.duration = int(duration / interval)
self.threshold = threshold
self.fees_per = self.get_fees()
self.stop_loss = stop_loss
self.ema = EMA(self.duration)
self.buy_order = None
self.sell_order = None
self.buy_position = 0
self.buy_px = 0
self.sell_position = 0
self.sell_px = 0
self.last_email_time = 0
self.run = True
self.ticker = None
def get_fees(self):
account_info = self.trade_client.account_info()
return float(account_info[0]["maker_fees"])
def get_pnl(self):
pos = max(self.buy_position, self.sell_position)
if(pos == 0):
return 0
bid = float(self.ticker['bid'])
ask = float(self.ticker['ask'])
buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position * self.buy_px) / pos
sell_avg_px = ((pos - self.sell_position) * bid + self.sell_position * self.sell_px) / pos
cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0
return pos * (sell_avg_px - buy_avg_px) - cost
def buy_sell_px(self):
position_offset = self.net_position() / self.total_amount / 2.0
buy_threshold = self.threshold * (1 + position_offset)
sell_threshold = self.threshold * (1 - position_offset)
buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))
sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))
return (buy_px, sell_px)
def can_buy(self, buy_px, bid):
return (buy_px >= bid and self.net_position() < self.total_amount)
def can_sell(self, sell_px, ask):
return (sell_px <= ask and -self.net_position() < self.total_amount)
def net_position(self):
return (self.buy_position - self.sell_position)
def update_order_status(self):
try:
if(self.buy_order != None):
status = self.trade_client.status_order(self.buy_order.id)
executed_amount = float(status["executed_amount"])
executed_px = float(status["avg_execution_price"])
new_executed_amount = executed_amount - self.buy_order.traded_amount
if (new_executed_amount > 0):
print(status)
new_executed_px = (executed_amount * executed_px - self.buy_order.traded_amount * self.buy_order.traded_px) / new_executed_amount
self.buy_order.traded_amount = executed_amount
self.buy_order.traded_px = executed_px
self.buy_px = (self.buy_px * self.buy_position + new_executed_amount * new_executed_px) / (self.buy_position + new_executed_amount)
self.buy_position += new_executed_amount
if(not status["is_live"]):
self.buy_order = None
if (self.sell_order != None):
status = self.trade_client.status_order(self.sell_order.id)
executed_amount = float(status["executed_amount"])
executed_px = float(status["avg_execution_price"])
new_executed_amount = executed_amount - self.sell_order.traded_amount
if(new_executed_amount > 0):
print(status)
new_executed_px = (executed_amount * executed_px - self.sell_order.traded_amount * self.sell_order.traded_px) / new_executed_amount
self.sell_order.traded_amount = executed_amount
self.sell_order.traded_px = executed_px
self.sell_px = (self.sell_px * self.sell_position + new_executed_amount * new_executed_px) / (self.sell_position + new_executed_amount)
self.sell_position += new_executed_amount
print("PNL => " + str(self.get_pnl()))
if (not status["is_live"]):
self.sell_order = None
print("Position => BuyPos:" + str(self.buy_position) + "|BuyPx:" + str(self.buy_px) + "|SellPos:" + str(self.sell_position) + "|SellPx:" + str(self.sell_px))
except Exception as e:
print("Update Order Status Exception: " + str(e))
pass
def sqoff(self):
pnl = self.get_pnl()
if(pnl < -self.stop_loss):
position = self.net_position()
if(position > 0):
self.sell_order = Order(position, float(self.ticker['bid']), "sell", "exchange fill-or-kill", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(position), str(self.ticker['bid']), "sell", "exchange fill-or-kill", False,
self.symbol)
print(status)
if ("order_id" in status):
self.sell_order.id = status["order_id"]
else:
self.sell_order = None
return True
elif(position < 0):
self.buy_order = Order(-position, float(self.ticker['ask']), "buy", "exchange fill-or-kill", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(-position), str(self.ticker['ask']), "buy", "exchange fill-or-kill", False,
self.symbol)
print(status)
if ("order_id" in status):
self.buy_order.id = status["order_id"]
else:
self.buy_order = None
return True
return False
def email(self, message, sub, emails):
curr_time = time.time()
if (curr_time - self.last_email_time > 1800):
print(message)
def trade(self):
while (self.run):
time.sleep(self.interval)
try:
self.ticker = self.client.ticker(self.symbol)
except Exception as e:
print("Ticker Exception: " + str(e))
continue
self.update_order_status()
self.ema.update(self.ticker['mid'])
if(self.ema.ready()):
if (not self.sqoff()):
buy_px, sell_px = self.buy_sell_px()
bid = self.ticker['bid']
ask = self.ticker['ask']
buy_px = min(bid, buy_px)
sell_px = max(ask, sell_px)
print("Market => EMA: " + str(round(self.ema.value)) + "|BuyPx:" + str(buy_px) + "|SellPx:" + str(sell_px) + "|Bid:" + str(bid) + "|Ask:" + str(ask))
if(self.can_buy(buy_px, bid)):
if(self.buy_order == None):
amount = min(self.single_order_amount, self.total_amount - max(0, self.net_position()))
self.buy_order = Order(amount, buy_px, "buy", "exchange limit", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(amount), str(buy_px), "buy", "exchange limit", True, self.symbol)
print(status)
if ("order_id" in status):
self.buy_order.id = status["order_id"]
else:
self.buy_order = None
else:
if(abs(self.buy_order.price - buy_px) / buy_px * 100.0 > self.threshold / 10.0 ):
self.trade_client.delete_order(self.buy_order.id)
if (self.can_sell(sell_px, ask)):
if (self.sell_order == None):
amount = min(self.single_order_amount, self.total_amount - max(0, -self.net_position()))
self.sell_order = Order(amount, sell_px, "sell", "exchange limit", self.symbol)
print(self.sell_order)
status = self.trade_client.place_order(str(amount), str(sell_px), "sell", "exchange limit", True, self.symbol)
print(status)
if("order_id" in status):
self.sell_order.id = status["order_id"]
else:
self.sell_order = None
else:
if (abs(self.sell_order.price - sell_px) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount):
self.trade_client.delete_order(self.sell_order.id)
|
normal
|
{
"blob_id": "6abfd6c0a644356ae0bc75d62472b5c495118a8e",
"index": 4466,
"step-1": "<mask token>\n\n\nclass BitfinexMMTrader:\n <mask token>\n\n def get_fees(self):\n account_info = self.trade_client.account_info()\n return float(account_info[0]['maker_fees'])\n\n def get_pnl(self):\n pos = max(self.buy_position, self.sell_position)\n if pos == 0:\n return 0\n bid = float(self.ticker['bid'])\n ask = float(self.ticker['ask'])\n buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position *\n self.buy_px) / pos\n sell_avg_px = ((pos - self.sell_position) * bid + self.\n sell_position * self.sell_px) / pos\n cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0\n return pos * (sell_avg_px - buy_avg_px) - cost\n\n def buy_sell_px(self):\n position_offset = self.net_position() / self.total_amount / 2.0\n buy_threshold = self.threshold * (1 + position_offset)\n sell_threshold = self.threshold * (1 - position_offset)\n buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))\n sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))\n return buy_px, sell_px\n\n def can_buy(self, buy_px, bid):\n return buy_px >= bid and self.net_position() < self.total_amount\n\n def can_sell(self, sell_px, ask):\n return sell_px <= ask and -self.net_position() < self.total_amount\n\n def net_position(self):\n return self.buy_position - self.sell_position\n\n def update_order_status(self):\n try:\n if self.buy_order != None:\n status = self.trade_client.status_order(self.buy_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.buy_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .buy_order.traded_amount * self.buy_order.traded_px\n ) / new_executed_amount\n self.buy_order.traded_amount = executed_amount\n self.buy_order.traded_px = executed_px\n self.buy_px = (self.buy_px * self.buy_position + \n new_executed_amount * new_executed_px) / (self.\n buy_position + new_executed_amount)\n self.buy_position += new_executed_amount\n if not status['is_live']:\n self.buy_order = None\n if self.sell_order != None:\n status = self.trade_client.status_order(self.sell_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.sell_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .sell_order.traded_amount * self.sell_order.traded_px\n ) / new_executed_amount\n self.sell_order.traded_amount = executed_amount\n self.sell_order.traded_px = executed_px\n self.sell_px = (self.sell_px * self.sell_position + \n new_executed_amount * new_executed_px) / (self.\n sell_position + new_executed_amount)\n self.sell_position += new_executed_amount\n print('PNL => ' + str(self.get_pnl()))\n if not status['is_live']:\n self.sell_order = None\n print('Position => BuyPos:' + str(self.buy_position) +\n '|BuyPx:' + str(self.buy_px) + '|SellPos:' + str(self.\n sell_position) + '|SellPx:' + str(self.sell_px))\n except Exception as e:\n print('Update Order Status Exception: ' + str(e))\n pass\n\n def sqoff(self):\n pnl = self.get_pnl()\n if pnl < -self.stop_loss:\n position = self.net_position()\n if position > 0:\n self.sell_order = Order(position, float(self.ticker['bid']),\n 'sell', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(position), str(\n self.ticker['bid']), 'sell', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n return True\n elif position < 0:\n self.buy_order = Order(-position, float(self.ticker['ask']),\n 'buy', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(-position), str(\n self.ticker['ask']), 'buy', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n return True\n return False\n\n def email(self, message, sub, emails):\n curr_time = time.time()\n if curr_time - self.last_email_time > 1800:\n print(message)\n\n def trade(self):\n while self.run:\n time.sleep(self.interval)\n try:\n self.ticker = self.client.ticker(self.symbol)\n except Exception as e:\n print('Ticker Exception: ' + str(e))\n continue\n self.update_order_status()\n self.ema.update(self.ticker['mid'])\n if self.ema.ready():\n if not self.sqoff():\n buy_px, sell_px = self.buy_sell_px()\n bid = self.ticker['bid']\n ask = self.ticker['ask']\n buy_px = min(bid, buy_px)\n sell_px = max(ask, sell_px)\n print('Market => EMA: ' + str(round(self.ema.value)) +\n '|BuyPx:' + str(buy_px) + '|SellPx:' + str(sell_px) +\n '|Bid:' + str(bid) + '|Ask:' + str(ask))\n if self.can_buy(buy_px, bid):\n if self.buy_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, self.net_position()))\n self.buy_order = Order(amount, buy_px, 'buy',\n 'exchange limit', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(\n amount), str(buy_px), 'buy',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n elif abs(self.buy_order.price - buy_px\n ) / buy_px * 100.0 > self.threshold / 10.0:\n self.trade_client.delete_order(self.buy_order.id)\n if self.can_sell(sell_px, ask):\n if self.sell_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, -self.net_position()))\n self.sell_order = Order(amount, sell_px, 'sell',\n 'exchange limit', self.symbol)\n print(self.sell_order)\n status = self.trade_client.place_order(str(\n amount), str(sell_px), 'sell',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n elif abs(self.sell_order.price - sell_px\n ) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount:\n self.trade_client.delete_order(self.sell_order.id)\n",
"step-2": "<mask token>\n\n\nclass EMA:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Order:\n\n def __init__(self, amount, price, side, ord_type, symbol):\n self.amount = amount\n self.price = price\n self.side = side\n self.ord_type = ord_type\n self.symbol = symbol\n self.traded_amount = 0\n self.traded_px = 0\n self.id = -1\n\n def __str__(self):\n return 'Order => Amount:' + str(self.amount) + '|Price:' + str(self\n .price) + '|Side:' + str(self.side)\n\n\nclass BitfinexMMTrader:\n\n def __init__(self, key, secret, symbol, single_order_amount,\n total_amount, interval, duration, threshold, stop_loss):\n self.symbol = symbol\n self.sym1 = symbol[:3]\n self.sym2 = symbol[3:]\n self.trade_client = BitfinexTradeClient(key, secret)\n self.client = BitfinexClient()\n self.total_amount = total_amount\n self.single_order_amount = single_order_amount\n self.interval = interval\n self.duration = int(duration / interval)\n self.threshold = threshold\n self.fees_per = self.get_fees()\n self.stop_loss = stop_loss\n self.ema = EMA(self.duration)\n self.buy_order = None\n self.sell_order = None\n self.buy_position = 0\n self.buy_px = 0\n self.sell_position = 0\n self.sell_px = 0\n self.last_email_time = 0\n self.run = True\n self.ticker = None\n\n def get_fees(self):\n account_info = self.trade_client.account_info()\n return float(account_info[0]['maker_fees'])\n\n def get_pnl(self):\n pos = max(self.buy_position, self.sell_position)\n if pos == 0:\n return 0\n bid = float(self.ticker['bid'])\n ask = float(self.ticker['ask'])\n buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position *\n self.buy_px) / pos\n sell_avg_px = ((pos - self.sell_position) * bid + self.\n sell_position * self.sell_px) / pos\n cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0\n return pos * (sell_avg_px - buy_avg_px) - cost\n\n def buy_sell_px(self):\n position_offset = self.net_position() / self.total_amount / 2.0\n buy_threshold = self.threshold * (1 + position_offset)\n sell_threshold = self.threshold * (1 - position_offset)\n buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))\n sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))\n return buy_px, sell_px\n\n def can_buy(self, buy_px, bid):\n return buy_px >= bid and self.net_position() < self.total_amount\n\n def can_sell(self, sell_px, ask):\n return sell_px <= ask and -self.net_position() < self.total_amount\n\n def net_position(self):\n return self.buy_position - self.sell_position\n\n def update_order_status(self):\n try:\n if self.buy_order != None:\n status = self.trade_client.status_order(self.buy_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.buy_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .buy_order.traded_amount * self.buy_order.traded_px\n ) / new_executed_amount\n self.buy_order.traded_amount = executed_amount\n self.buy_order.traded_px = executed_px\n self.buy_px = (self.buy_px * self.buy_position + \n new_executed_amount * new_executed_px) / (self.\n buy_position + new_executed_amount)\n self.buy_position += new_executed_amount\n if not status['is_live']:\n self.buy_order = None\n if self.sell_order != None:\n status = self.trade_client.status_order(self.sell_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.sell_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .sell_order.traded_amount * self.sell_order.traded_px\n ) / new_executed_amount\n self.sell_order.traded_amount = executed_amount\n self.sell_order.traded_px = executed_px\n self.sell_px = (self.sell_px * self.sell_position + \n new_executed_amount * new_executed_px) / (self.\n sell_position + new_executed_amount)\n self.sell_position += new_executed_amount\n print('PNL => ' + str(self.get_pnl()))\n if not status['is_live']:\n self.sell_order = None\n print('Position => BuyPos:' + str(self.buy_position) +\n '|BuyPx:' + str(self.buy_px) + '|SellPos:' + str(self.\n sell_position) + '|SellPx:' + str(self.sell_px))\n except Exception as e:\n print('Update Order Status Exception: ' + str(e))\n pass\n\n def sqoff(self):\n pnl = self.get_pnl()\n if pnl < -self.stop_loss:\n position = self.net_position()\n if position > 0:\n self.sell_order = Order(position, float(self.ticker['bid']),\n 'sell', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(position), str(\n self.ticker['bid']), 'sell', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n return True\n elif position < 0:\n self.buy_order = Order(-position, float(self.ticker['ask']),\n 'buy', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(-position), str(\n self.ticker['ask']), 'buy', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n return True\n return False\n\n def email(self, message, sub, emails):\n curr_time = time.time()\n if curr_time - self.last_email_time > 1800:\n print(message)\n\n def trade(self):\n while self.run:\n time.sleep(self.interval)\n try:\n self.ticker = self.client.ticker(self.symbol)\n except Exception as e:\n print('Ticker Exception: ' + str(e))\n continue\n self.update_order_status()\n self.ema.update(self.ticker['mid'])\n if self.ema.ready():\n if not self.sqoff():\n buy_px, sell_px = self.buy_sell_px()\n bid = self.ticker['bid']\n ask = self.ticker['ask']\n buy_px = min(bid, buy_px)\n sell_px = max(ask, sell_px)\n print('Market => EMA: ' + str(round(self.ema.value)) +\n '|BuyPx:' + str(buy_px) + '|SellPx:' + str(sell_px) +\n '|Bid:' + str(bid) + '|Ask:' + str(ask))\n if self.can_buy(buy_px, bid):\n if self.buy_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, self.net_position()))\n self.buy_order = Order(amount, buy_px, 'buy',\n 'exchange limit', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(\n amount), str(buy_px), 'buy',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n elif abs(self.buy_order.price - buy_px\n ) / buy_px * 100.0 > self.threshold / 10.0:\n self.trade_client.delete_order(self.buy_order.id)\n if self.can_sell(sell_px, ask):\n if self.sell_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, -self.net_position()))\n self.sell_order = Order(amount, sell_px, 'sell',\n 'exchange limit', self.symbol)\n print(self.sell_order)\n status = self.trade_client.place_order(str(\n amount), str(sell_px), 'sell',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n elif abs(self.sell_order.price - sell_px\n ) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount:\n self.trade_client.delete_order(self.sell_order.id)\n",
"step-3": "<mask token>\nKEY = 'nBi8YyJZZ9ZhSOf2jEpMAoBpzKt2Shh6IoLdTjFRYvb'\nSECRET = 'XO6FUYbhFYqBflXYSaKMiu1hGHLhGf63xsOK0Pf7osA'\n\n\nclass EMA:\n\n def __init__(self, duration):\n self.value = 0\n self.duration = duration\n self.count = 0\n self.multiplier = 2.0 / (self.duration + 1)\n\n def update(self, px):\n if self.count < self.duration:\n self.count += 1\n multiplier = 2.0 / (self.count + 1)\n self.value = multiplier * px + (1 - multiplier) * self.value\n else:\n self.value = self.multiplier * px + (1 - self.multiplier\n ) * self.value\n\n def ready(self):\n return self.count >= self.duration * 0.05\n\n\nclass Order:\n\n def __init__(self, amount, price, side, ord_type, symbol):\n self.amount = amount\n self.price = price\n self.side = side\n self.ord_type = ord_type\n self.symbol = symbol\n self.traded_amount = 0\n self.traded_px = 0\n self.id = -1\n\n def __str__(self):\n return 'Order => Amount:' + str(self.amount) + '|Price:' + str(self\n .price) + '|Side:' + str(self.side)\n\n\nclass BitfinexMMTrader:\n\n def __init__(self, key, secret, symbol, single_order_amount,\n total_amount, interval, duration, threshold, stop_loss):\n self.symbol = symbol\n self.sym1 = symbol[:3]\n self.sym2 = symbol[3:]\n self.trade_client = BitfinexTradeClient(key, secret)\n self.client = BitfinexClient()\n self.total_amount = total_amount\n self.single_order_amount = single_order_amount\n self.interval = interval\n self.duration = int(duration / interval)\n self.threshold = threshold\n self.fees_per = self.get_fees()\n self.stop_loss = stop_loss\n self.ema = EMA(self.duration)\n self.buy_order = None\n self.sell_order = None\n self.buy_position = 0\n self.buy_px = 0\n self.sell_position = 0\n self.sell_px = 0\n self.last_email_time = 0\n self.run = True\n self.ticker = None\n\n def get_fees(self):\n account_info = self.trade_client.account_info()\n return float(account_info[0]['maker_fees'])\n\n def get_pnl(self):\n pos = max(self.buy_position, self.sell_position)\n if pos == 0:\n return 0\n bid = float(self.ticker['bid'])\n ask = float(self.ticker['ask'])\n buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position *\n self.buy_px) / pos\n sell_avg_px = ((pos - self.sell_position) * bid + self.\n sell_position * self.sell_px) / pos\n cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0\n return pos * (sell_avg_px - buy_avg_px) - cost\n\n def buy_sell_px(self):\n position_offset = self.net_position() / self.total_amount / 2.0\n buy_threshold = self.threshold * (1 + position_offset)\n sell_threshold = self.threshold * (1 - position_offset)\n buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))\n sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))\n return buy_px, sell_px\n\n def can_buy(self, buy_px, bid):\n return buy_px >= bid and self.net_position() < self.total_amount\n\n def can_sell(self, sell_px, ask):\n return sell_px <= ask and -self.net_position() < self.total_amount\n\n def net_position(self):\n return self.buy_position - self.sell_position\n\n def update_order_status(self):\n try:\n if self.buy_order != None:\n status = self.trade_client.status_order(self.buy_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.buy_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .buy_order.traded_amount * self.buy_order.traded_px\n ) / new_executed_amount\n self.buy_order.traded_amount = executed_amount\n self.buy_order.traded_px = executed_px\n self.buy_px = (self.buy_px * self.buy_position + \n new_executed_amount * new_executed_px) / (self.\n buy_position + new_executed_amount)\n self.buy_position += new_executed_amount\n if not status['is_live']:\n self.buy_order = None\n if self.sell_order != None:\n status = self.trade_client.status_order(self.sell_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.sell_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .sell_order.traded_amount * self.sell_order.traded_px\n ) / new_executed_amount\n self.sell_order.traded_amount = executed_amount\n self.sell_order.traded_px = executed_px\n self.sell_px = (self.sell_px * self.sell_position + \n new_executed_amount * new_executed_px) / (self.\n sell_position + new_executed_amount)\n self.sell_position += new_executed_amount\n print('PNL => ' + str(self.get_pnl()))\n if not status['is_live']:\n self.sell_order = None\n print('Position => BuyPos:' + str(self.buy_position) +\n '|BuyPx:' + str(self.buy_px) + '|SellPos:' + str(self.\n sell_position) + '|SellPx:' + str(self.sell_px))\n except Exception as e:\n print('Update Order Status Exception: ' + str(e))\n pass\n\n def sqoff(self):\n pnl = self.get_pnl()\n if pnl < -self.stop_loss:\n position = self.net_position()\n if position > 0:\n self.sell_order = Order(position, float(self.ticker['bid']),\n 'sell', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(position), str(\n self.ticker['bid']), 'sell', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n return True\n elif position < 0:\n self.buy_order = Order(-position, float(self.ticker['ask']),\n 'buy', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(-position), str(\n self.ticker['ask']), 'buy', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n return True\n return False\n\n def email(self, message, sub, emails):\n curr_time = time.time()\n if curr_time - self.last_email_time > 1800:\n print(message)\n\n def trade(self):\n while self.run:\n time.sleep(self.interval)\n try:\n self.ticker = self.client.ticker(self.symbol)\n except Exception as e:\n print('Ticker Exception: ' + str(e))\n continue\n self.update_order_status()\n self.ema.update(self.ticker['mid'])\n if self.ema.ready():\n if not self.sqoff():\n buy_px, sell_px = self.buy_sell_px()\n bid = self.ticker['bid']\n ask = self.ticker['ask']\n buy_px = min(bid, buy_px)\n sell_px = max(ask, sell_px)\n print('Market => EMA: ' + str(round(self.ema.value)) +\n '|BuyPx:' + str(buy_px) + '|SellPx:' + str(sell_px) +\n '|Bid:' + str(bid) + '|Ask:' + str(ask))\n if self.can_buy(buy_px, bid):\n if self.buy_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, self.net_position()))\n self.buy_order = Order(amount, buy_px, 'buy',\n 'exchange limit', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(\n amount), str(buy_px), 'buy',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n elif abs(self.buy_order.price - buy_px\n ) / buy_px * 100.0 > self.threshold / 10.0:\n self.trade_client.delete_order(self.buy_order.id)\n if self.can_sell(sell_px, ask):\n if self.sell_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, -self.net_position()))\n self.sell_order = Order(amount, sell_px, 'sell',\n 'exchange limit', self.symbol)\n print(self.sell_order)\n status = self.trade_client.place_order(str(\n amount), str(sell_px), 'sell',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n elif abs(self.sell_order.price - sell_px\n ) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount:\n self.trade_client.delete_order(self.sell_order.id)\n",
"step-4": "import time\nfrom bitfinex_trade_client import BitfinexClient, BitfinexTradeClient\nKEY = 'nBi8YyJZZ9ZhSOf2jEpMAoBpzKt2Shh6IoLdTjFRYvb'\nSECRET = 'XO6FUYbhFYqBflXYSaKMiu1hGHLhGf63xsOK0Pf7osA'\n\n\nclass EMA:\n\n def __init__(self, duration):\n self.value = 0\n self.duration = duration\n self.count = 0\n self.multiplier = 2.0 / (self.duration + 1)\n\n def update(self, px):\n if self.count < self.duration:\n self.count += 1\n multiplier = 2.0 / (self.count + 1)\n self.value = multiplier * px + (1 - multiplier) * self.value\n else:\n self.value = self.multiplier * px + (1 - self.multiplier\n ) * self.value\n\n def ready(self):\n return self.count >= self.duration * 0.05\n\n\nclass Order:\n\n def __init__(self, amount, price, side, ord_type, symbol):\n self.amount = amount\n self.price = price\n self.side = side\n self.ord_type = ord_type\n self.symbol = symbol\n self.traded_amount = 0\n self.traded_px = 0\n self.id = -1\n\n def __str__(self):\n return 'Order => Amount:' + str(self.amount) + '|Price:' + str(self\n .price) + '|Side:' + str(self.side)\n\n\nclass BitfinexMMTrader:\n\n def __init__(self, key, secret, symbol, single_order_amount,\n total_amount, interval, duration, threshold, stop_loss):\n self.symbol = symbol\n self.sym1 = symbol[:3]\n self.sym2 = symbol[3:]\n self.trade_client = BitfinexTradeClient(key, secret)\n self.client = BitfinexClient()\n self.total_amount = total_amount\n self.single_order_amount = single_order_amount\n self.interval = interval\n self.duration = int(duration / interval)\n self.threshold = threshold\n self.fees_per = self.get_fees()\n self.stop_loss = stop_loss\n self.ema = EMA(self.duration)\n self.buy_order = None\n self.sell_order = None\n self.buy_position = 0\n self.buy_px = 0\n self.sell_position = 0\n self.sell_px = 0\n self.last_email_time = 0\n self.run = True\n self.ticker = None\n\n def get_fees(self):\n account_info = self.trade_client.account_info()\n return float(account_info[0]['maker_fees'])\n\n def get_pnl(self):\n pos = max(self.buy_position, self.sell_position)\n if pos == 0:\n return 0\n bid = float(self.ticker['bid'])\n ask = float(self.ticker['ask'])\n buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position *\n self.buy_px) / pos\n sell_avg_px = ((pos - self.sell_position) * bid + self.\n sell_position * self.sell_px) / pos\n cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0\n return pos * (sell_avg_px - buy_avg_px) - cost\n\n def buy_sell_px(self):\n position_offset = self.net_position() / self.total_amount / 2.0\n buy_threshold = self.threshold * (1 + position_offset)\n sell_threshold = self.threshold * (1 - position_offset)\n buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))\n sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))\n return buy_px, sell_px\n\n def can_buy(self, buy_px, bid):\n return buy_px >= bid and self.net_position() < self.total_amount\n\n def can_sell(self, sell_px, ask):\n return sell_px <= ask and -self.net_position() < self.total_amount\n\n def net_position(self):\n return self.buy_position - self.sell_position\n\n def update_order_status(self):\n try:\n if self.buy_order != None:\n status = self.trade_client.status_order(self.buy_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.buy_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .buy_order.traded_amount * self.buy_order.traded_px\n ) / new_executed_amount\n self.buy_order.traded_amount = executed_amount\n self.buy_order.traded_px = executed_px\n self.buy_px = (self.buy_px * self.buy_position + \n new_executed_amount * new_executed_px) / (self.\n buy_position + new_executed_amount)\n self.buy_position += new_executed_amount\n if not status['is_live']:\n self.buy_order = None\n if self.sell_order != None:\n status = self.trade_client.status_order(self.sell_order.id)\n executed_amount = float(status['executed_amount'])\n executed_px = float(status['avg_execution_price'])\n new_executed_amount = (executed_amount - self.sell_order.\n traded_amount)\n if new_executed_amount > 0:\n print(status)\n new_executed_px = (executed_amount * executed_px - self\n .sell_order.traded_amount * self.sell_order.traded_px\n ) / new_executed_amount\n self.sell_order.traded_amount = executed_amount\n self.sell_order.traded_px = executed_px\n self.sell_px = (self.sell_px * self.sell_position + \n new_executed_amount * new_executed_px) / (self.\n sell_position + new_executed_amount)\n self.sell_position += new_executed_amount\n print('PNL => ' + str(self.get_pnl()))\n if not status['is_live']:\n self.sell_order = None\n print('Position => BuyPos:' + str(self.buy_position) +\n '|BuyPx:' + str(self.buy_px) + '|SellPos:' + str(self.\n sell_position) + '|SellPx:' + str(self.sell_px))\n except Exception as e:\n print('Update Order Status Exception: ' + str(e))\n pass\n\n def sqoff(self):\n pnl = self.get_pnl()\n if pnl < -self.stop_loss:\n position = self.net_position()\n if position > 0:\n self.sell_order = Order(position, float(self.ticker['bid']),\n 'sell', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(position), str(\n self.ticker['bid']), 'sell', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n return True\n elif position < 0:\n self.buy_order = Order(-position, float(self.ticker['ask']),\n 'buy', 'exchange fill-or-kill', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(-position), str(\n self.ticker['ask']), 'buy', 'exchange fill-or-kill', \n False, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n return True\n return False\n\n def email(self, message, sub, emails):\n curr_time = time.time()\n if curr_time - self.last_email_time > 1800:\n print(message)\n\n def trade(self):\n while self.run:\n time.sleep(self.interval)\n try:\n self.ticker = self.client.ticker(self.symbol)\n except Exception as e:\n print('Ticker Exception: ' + str(e))\n continue\n self.update_order_status()\n self.ema.update(self.ticker['mid'])\n if self.ema.ready():\n if not self.sqoff():\n buy_px, sell_px = self.buy_sell_px()\n bid = self.ticker['bid']\n ask = self.ticker['ask']\n buy_px = min(bid, buy_px)\n sell_px = max(ask, sell_px)\n print('Market => EMA: ' + str(round(self.ema.value)) +\n '|BuyPx:' + str(buy_px) + '|SellPx:' + str(sell_px) +\n '|Bid:' + str(bid) + '|Ask:' + str(ask))\n if self.can_buy(buy_px, bid):\n if self.buy_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, self.net_position()))\n self.buy_order = Order(amount, buy_px, 'buy',\n 'exchange limit', self.symbol)\n print(self.buy_order)\n status = self.trade_client.place_order(str(\n amount), str(buy_px), 'buy',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.buy_order.id = status['order_id']\n else:\n self.buy_order = None\n elif abs(self.buy_order.price - buy_px\n ) / buy_px * 100.0 > self.threshold / 10.0:\n self.trade_client.delete_order(self.buy_order.id)\n if self.can_sell(sell_px, ask):\n if self.sell_order == None:\n amount = min(self.single_order_amount, self.\n total_amount - max(0, -self.net_position()))\n self.sell_order = Order(amount, sell_px, 'sell',\n 'exchange limit', self.symbol)\n print(self.sell_order)\n status = self.trade_client.place_order(str(\n amount), str(sell_px), 'sell',\n 'exchange limit', True, self.symbol)\n print(status)\n if 'order_id' in status:\n self.sell_order.id = status['order_id']\n else:\n self.sell_order = None\n elif abs(self.sell_order.price - sell_px\n ) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount:\n self.trade_client.delete_order(self.sell_order.id)\n",
"step-5": "import time\n\nfrom bitfinex_trade_client import BitfinexClient,BitfinexTradeClient\n\nKEY = \"nBi8YyJZZ9ZhSOf2jEpMAoBpzKt2Shh6IoLdTjFRYvb\"\nSECRET = \"XO6FUYbhFYqBflXYSaKMiu1hGHLhGf63xsOK0Pf7osA\"\n\nclass EMA:\n def __init__(self, duration):\n self.value = 0\n\n self.duration = duration\n self.count = 0\n self.multiplier = 2.0 / (self.duration + 1)\n\n def update(self, px):\n if(self.count < self.duration):\n self.count += 1\n multiplier = 2.0/(self.count + 1)\n self.value = multiplier * px + (1 - multiplier) * self.value\n else:\n self.value = self.multiplier * px + (1 - self.multiplier) * self.value\n\n def ready(self):\n return (self.count >= self.duration * 0.05)\n\nclass Order:\n def __init__(self, amount, price, side, ord_type, symbol):\n self.amount = amount\n self.price = price\n self.side = side\n self.ord_type = ord_type\n self.symbol = symbol\n self.traded_amount = 0\n self.traded_px = 0\n self.id = -1\n\n def __str__(self):\n return \"Order => Amount:\" + str(self.amount) + \"|Price:\" + str(self.price) + \"|Side:\" + str(self.side)\n\nclass BitfinexMMTrader:\n def __init__(self, key, secret, symbol, single_order_amount, total_amount, interval, duration, threshold, stop_loss):\n self.symbol = symbol\n\n self.sym1 = symbol[:3]\n self.sym2 = symbol[3:]\n\n\n self.trade_client = BitfinexTradeClient(key, secret)\n self.client = BitfinexClient()\n\n self.total_amount = total_amount\n self.single_order_amount = single_order_amount\n self.interval = interval\n self.duration = int(duration / interval)\n self.threshold = threshold\n self.fees_per = self.get_fees()\n self.stop_loss = stop_loss\n\n self.ema = EMA(self.duration)\n\n self.buy_order = None\n self.sell_order = None\n\n self.buy_position = 0\n self.buy_px = 0\n self.sell_position = 0\n self.sell_px = 0\n\n self.last_email_time = 0\n\n self.run = True\n\n self.ticker = None\n\n def get_fees(self):\n account_info = self.trade_client.account_info()\n return float(account_info[0][\"maker_fees\"])\n\n def get_pnl(self):\n pos = max(self.buy_position, self.sell_position)\n\n if(pos == 0):\n return 0\n\n bid = float(self.ticker['bid'])\n ask = float(self.ticker['ask'])\n\n buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position * self.buy_px) / pos\n sell_avg_px = ((pos - self.sell_position) * bid + self.sell_position * self.sell_px) / pos\n\n cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0\n\n return pos * (sell_avg_px - buy_avg_px) - cost\n\n\n def buy_sell_px(self):\n position_offset = self.net_position() / self.total_amount / 2.0\n buy_threshold = self.threshold * (1 + position_offset)\n sell_threshold = self.threshold * (1 - position_offset)\n buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))\n sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))\n\n return (buy_px, sell_px)\n\n def can_buy(self, buy_px, bid):\n return (buy_px >= bid and self.net_position() < self.total_amount)\n\n def can_sell(self, sell_px, ask):\n return (sell_px <= ask and -self.net_position() < self.total_amount)\n\n def net_position(self):\n return (self.buy_position - self.sell_position)\n\n def update_order_status(self):\n try:\n if(self.buy_order != None):\n status = self.trade_client.status_order(self.buy_order.id)\n\n executed_amount = float(status[\"executed_amount\"])\n executed_px = float(status[\"avg_execution_price\"])\n\n new_executed_amount = executed_amount - self.buy_order.traded_amount\n\n if (new_executed_amount > 0):\n print(status)\n new_executed_px = (executed_amount * executed_px - self.buy_order.traded_amount * self.buy_order.traded_px) / new_executed_amount\n\n self.buy_order.traded_amount = executed_amount\n self.buy_order.traded_px = executed_px\n\n self.buy_px = (self.buy_px * self.buy_position + new_executed_amount * new_executed_px) / (self.buy_position + new_executed_amount)\n self.buy_position += new_executed_amount\n\n if(not status[\"is_live\"]):\n self.buy_order = None\n\n if (self.sell_order != None):\n status = self.trade_client.status_order(self.sell_order.id)\n\n executed_amount = float(status[\"executed_amount\"])\n executed_px = float(status[\"avg_execution_price\"])\n\n new_executed_amount = executed_amount - self.sell_order.traded_amount\n\n if(new_executed_amount > 0):\n print(status)\n new_executed_px = (executed_amount * executed_px - self.sell_order.traded_amount * self.sell_order.traded_px) / new_executed_amount\n\n self.sell_order.traded_amount = executed_amount\n self.sell_order.traded_px = executed_px\n\n self.sell_px = (self.sell_px * self.sell_position + new_executed_amount * new_executed_px) / (self.sell_position + new_executed_amount)\n self.sell_position += new_executed_amount\n print(\"PNL => \" + str(self.get_pnl()))\n\n if (not status[\"is_live\"]):\n self.sell_order = None\n\n print(\"Position => BuyPos:\" + str(self.buy_position) + \"|BuyPx:\" + str(self.buy_px) + \"|SellPos:\" + str(self.sell_position) + \"|SellPx:\" + str(self.sell_px))\n\n except Exception as e:\n print(\"Update Order Status Exception: \" + str(e))\n pass\n\n def sqoff(self):\n pnl = self.get_pnl()\n\n if(pnl < -self.stop_loss):\n position = self.net_position()\n if(position > 0):\n self.sell_order = Order(position, float(self.ticker['bid']), \"sell\", \"exchange fill-or-kill\", self.symbol)\n\n print(self.buy_order)\n\n status = self.trade_client.place_order(str(position), str(self.ticker['bid']), \"sell\", \"exchange fill-or-kill\", False,\n self.symbol)\n print(status)\n\n if (\"order_id\" in status):\n self.sell_order.id = status[\"order_id\"]\n else:\n self.sell_order = None\n\n return True\n elif(position < 0):\n self.buy_order = Order(-position, float(self.ticker['ask']), \"buy\", \"exchange fill-or-kill\", self.symbol)\n\n print(self.buy_order)\n\n status = self.trade_client.place_order(str(-position), str(self.ticker['ask']), \"buy\", \"exchange fill-or-kill\", False,\n self.symbol)\n print(status)\n\n if (\"order_id\" in status):\n self.buy_order.id = status[\"order_id\"]\n else:\n self.buy_order = None\n\n return True\n\n return False\n\n def email(self, message, sub, emails):\n\n curr_time = time.time()\n\n if (curr_time - self.last_email_time > 1800):\n print(message)\n\n def trade(self):\n while (self.run):\n time.sleep(self.interval)\n\n\n try:\n self.ticker = self.client.ticker(self.symbol)\n except Exception as e:\n print(\"Ticker Exception: \" + str(e))\n continue\n\n self.update_order_status()\n self.ema.update(self.ticker['mid'])\n\n if(self.ema.ready()):\n if (not self.sqoff()):\n buy_px, sell_px = self.buy_sell_px()\n\n bid = self.ticker['bid']\n ask = self.ticker['ask']\n\n buy_px = min(bid, buy_px)\n sell_px = max(ask, sell_px)\n\n print(\"Market => EMA: \" + str(round(self.ema.value)) + \"|BuyPx:\" + str(buy_px) + \"|SellPx:\" + str(sell_px) + \"|Bid:\" + str(bid) + \"|Ask:\" + str(ask))\n\n if(self.can_buy(buy_px, bid)):\n if(self.buy_order == None):\n amount = min(self.single_order_amount, self.total_amount - max(0, self.net_position()))\n self.buy_order = Order(amount, buy_px, \"buy\", \"exchange limit\", self.symbol)\n\n print(self.buy_order)\n\n status = self.trade_client.place_order(str(amount), str(buy_px), \"buy\", \"exchange limit\", True, self.symbol)\n print(status)\n\n if (\"order_id\" in status):\n self.buy_order.id = status[\"order_id\"]\n else:\n self.buy_order = None\n else:\n if(abs(self.buy_order.price - buy_px) / buy_px * 100.0 > self.threshold / 10.0 ):\n self.trade_client.delete_order(self.buy_order.id)\n\n if (self.can_sell(sell_px, ask)):\n if (self.sell_order == None):\n amount = min(self.single_order_amount, self.total_amount - max(0, -self.net_position()))\n self.sell_order = Order(amount, sell_px, \"sell\", \"exchange limit\", self.symbol)\n\n print(self.sell_order)\n\n status = self.trade_client.place_order(str(amount), str(sell_px), \"sell\", \"exchange limit\", True, self.symbol)\n print(status)\n\n if(\"order_id\" in status):\n self.sell_order.id = status[\"order_id\"]\n else:\n self.sell_order = None\n\n else:\n if (abs(self.sell_order.price - sell_px) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount):\n self.trade_client.delete_order(self.sell_order.id)\n\n\n",
"step-ids": [
11,
16,
20,
21,
22
]
}
|
[
11,
16,
20,
21,
22
] |
import socket
import time
class FileTransProgram(object):
def __init__(self, ADDR, file_name):
self.ADDR = ADDR
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(ADDR)
self.file_name = file_name
def recv(self):
self.sock.send(bytes("Connect", "utf8"))
file_size = self.sock.recv(1024).strip()
with open(self.file_name, "wb") as f:
while file_size > 0:
if file_size < 1024:
f.write(self.sock.recv(1024))
break
else:
f.write(self.sock.recv(1024))
file_size -= 1024
self.sock.send(bytes("Success", "utf-8"))
self.close()
def stor(self):
try:
with open(self.file_name, "rb") as f:
data = f.read()
self.sock.send(bytes(str(len(data)), "utf-8"))
time.sleep(0.2)
self.sock.send(data)
except FileNotFoundError as e:
raise e
ACK = str(self.sock.recv(1024).strip(), "utf-8")
if ACK == "Success":
self.close()
def close(self):
self.sock.close()
|
normal
|
{
"blob_id": "231a07e63e40f2e4d204cde76c52e64b922da1b8",
"index": 2619,
"step-1": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-4": "import socket\nimport time\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-5": "import socket\nimport time\n\n\nclass FileTransProgram(object):\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes(\"Connect\", \"utf8\"))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, \"wb\") as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes(\"Success\", \"utf-8\"))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, \"rb\") as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), \"utf-8\"))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), \"utf-8\")\n if ACK == \"Success\":\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Generated by Django 3.0.7 on 2020-07-05 07:34
from django.db import migrations, models
import location_field.models.plain
class Migration(migrations.Migration):
dependencies = [
('driver', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='driver',
name='address',
),
migrations.AddField(
model_name='driver',
name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='image',
field=models.ImageField(default='', upload_to='mechanic_img'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='location',
field=location_field.models.plain.PlainLocationField(default='', max_length=63),
preserve_default=False,
),
migrations.AlterField(
model_name='driver',
name='first_name',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='driver',
name='last_name',
field=models.CharField(max_length=150),
),
]
|
normal
|
{
"blob_id": "ea918bdf96572b38461dc1810bd0b8c16efd0f0d",
"index": 5786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-4": "from django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-5": "# Generated by Django 3.0.7 on 2020-07-05 07:34\n\nfrom django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('driver', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='driver',\n name='address',\n ),\n migrations.AddField(\n model_name='driver',\n name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='image',\n field=models.ImageField(default='', upload_to='mechanic_img'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='location',\n field=location_field.models.plain.PlainLocationField(default='', max_length=63),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='driver',\n name='first_name',\n field=models.CharField(max_length=150),\n ),\n migrations.AlterField(\n model_name='driver',\n name='last_name',\n field=models.CharField(max_length=150),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.0.5 on 2020-04-25 12:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_order_date'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='product',
),
migrations.AddField(
model_name='order',
name='product',
field=models.ManyToManyField(to='api.Product'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.TextField(default='неплачено', max_length=50),
),
]
|
normal
|
{
"blob_id": "4cc138016cb1f82e12c76c185be19188d3e38bf9",
"index": 2186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0006_order_date')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.AddField(model_name='order', name='product', field=\n models.ManyToManyField(to='api.Product')), migrations.AlterField(\n model_name='order', name='status', field=models.TextField(default=\n 'неплачено', max_length=50))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0006_order_date')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.AddField(model_name='order', name='product', field=\n models.ManyToManyField(to='api.Product')), migrations.AlterField(\n model_name='order', name='status', field=models.TextField(default=\n 'неплачено', max_length=50))]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-04-25 12:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0006_order_date'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='product',\n ),\n migrations.AddField(\n model_name='order',\n name='product',\n field=models.ManyToManyField(to='api.Product'),\n ),\n migrations.AlterField(\n model_name='order',\n name='status',\n field=models.TextField(default='неплачено', max_length=50),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
from scipy.stats import shapiro
import scipy.stats as stats
df_test = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Test Group")
df_control = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Control Group")
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
# Setting threshold value for outliers
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
# Checks for any outliers in the variable.
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]
print(col, " : ", number_of_outliers, "outliers")
for var in df_control:
print(var, "has ", has_outliers(df_control, [var]), "Outliers")
for var in df_test:
print(var, "has ", has_outliers(df_test, [var]), "Outliers")
# How would you describe the hypothesis of the A / B test?
# H0 : There is no statistical difference between the control and test groups in terms of average number of purchases.
# H1 : There is a statistical difference between the control and test groups in terms of the average number of purchases.
df_control["Purchase"].mean()
df_test["Purchase"].mean()
group_a = df_control["Purchase"]
group_b = df_test["Purchase"]
# 1- Assumption Check
# 1.1 - Normality Assumption
test_statistics, pvalue = shapiro(group_a)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_a is distributed normally.
test_statistics, pvalue = shapiro(group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_b is distributed normally.
# 1.2 - Variance Homogeneity Assumption
# H0: Variances Are Homogeneous
# H1: Variances Are Not Homogeneous
test_statistics, pvalue = stats.levene(group_a, group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# Variance homogeneity provided.
# HO: there is no statistical difference between the control and test groups in terms of average number of purchases.
# H1: there is a statistical difference between the control and test groups in terms of average number of purchases
# 1.1 Independent two-sample t-test if assumptions are provided (parametric test)
test_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
# Can we make statistically significant results?
# There is no statistically significant difference between the control group and test groups.
# The two groups are alike.
# Which test did you use? Why is that?
# We used the two-sample t-test (parametric test) since both assumptions are satisfied
# What is your advice to the customer?
# There is no statistical difference between average bidding and maximum bidding
# It can be preferred with a low cost per click.
# We can evaluate the differences in interaction gain and conversion rates and determine which method is more profitable.
# The test can be extended for 1 month.
# The number of observations can be increased.
|
normal
|
{
"blob_id": "9e01ba8c489791ec35b86dffe12d0cedb5f09004",
"index": 3919,
"step-1": "<mask token>\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\n<mask token>\n",
"step-2": "<mask token>\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-3": "<mask token>\ndf_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Test Group')\ndf_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Control Group')\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\ngroup_a = df_control['Purchase']\ngroup_b = df_test['Purchase']\ntest_statistics, pvalue = shapiro(group_a)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = shapiro(group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.levene(group_a, group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-4": "import pandas as pd\nfrom scipy.stats import shapiro\nimport scipy.stats as stats\ndf_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Test Group')\ndf_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Control Group')\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\ngroup_a = df_control['Purchase']\ngroup_b = df_test['Purchase']\ntest_statistics, pvalue = shapiro(group_a)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = shapiro(group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.levene(group_a, group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-5": "import pandas as pd\r\nfrom scipy.stats import shapiro\r\nimport scipy.stats as stats\r\n\r\ndf_test = pd.read_excel(\"datasets/ab_testing_data.xlsx\", sheet_name=\"Test Group\")\r\ndf_control = pd.read_excel(\"datasets/ab_testing_data.xlsx\", sheet_name=\"Control Group\")\r\n\r\ndf_test.head()\r\ndf_control.head()\r\n\r\ndf_control.info()\r\ndf_test.info()\r\n\r\ndf_test.shape\r\ndf_control.shape\r\n\r\n\r\n# Setting threshold value for outliers\r\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95):\r\n quantile_one = dataframe[variable].quantile(low_quantile)\r\n quantile_three = dataframe[variable].quantile(up_quantile)\r\n interquantile_range = quantile_three - quantile_one\r\n up_limit = quantile_three + 1.5 * interquantile_range\r\n low_limit = quantile_one - 1.5 * interquantile_range\r\n return low_limit, up_limit\r\n\r\n\r\n# Checks for any outliers in the variable.\r\ndef has_outliers(dataframe, numeric_columns):\r\n for col in numeric_columns:\r\n low_limit, up_limit = outlier_thresholds(dataframe, col)\r\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis=None):\r\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]\r\n print(col, \" : \", number_of_outliers, \"outliers\")\r\n\r\n\r\nfor var in df_control:\r\n print(var, \"has \", has_outliers(df_control, [var]), \"Outliers\")\r\n\r\nfor var in df_test:\r\n print(var, \"has \", has_outliers(df_test, [var]), \"Outliers\")\r\n\r\n# How would you describe the hypothesis of the A / B test?\r\n\r\n# H0 : There is no statistical difference between the control and test groups in terms of average number of purchases.\r\n# H1 : There is a statistical difference between the control and test groups in terms of the average number of purchases.\r\n\r\n\r\ndf_control[\"Purchase\"].mean()\r\ndf_test[\"Purchase\"].mean()\r\n\r\ngroup_a = df_control[\"Purchase\"]\r\ngroup_b = df_test[\"Purchase\"]\r\n\r\n# 1- Assumption Check\r\n\r\n# 1.1 - Normality Assumption\r\n\r\ntest_statistics, pvalue = shapiro(group_a)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# group_a is distributed normally.\r\n\r\ntest_statistics, pvalue = shapiro(group_b)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# group_b is distributed normally.\r\n\r\n# 1.2 - Variance Homogeneity Assumption\r\n\r\n# H0: Variances Are Homogeneous\r\n# H1: Variances Are Not Homogeneous\r\n\r\ntest_statistics, pvalue = stats.levene(group_a, group_b)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# Variance homogeneity provided.\r\n\r\n# HO: there is no statistical difference between the control and test groups in terms of average number of purchases.\r\n# H1: there is a statistical difference between the control and test groups in terms of average number of purchases\r\n\r\n# 1.1 Independent two-sample t-test if assumptions are provided (parametric test)\r\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\n# Can we make statistically significant results?\r\n\r\n# There is no statistically significant difference between the control group and test groups.\r\n# The two groups are alike.\r\n\r\n# Which test did you use? Why is that?\r\n\r\n# We used the two-sample t-test (parametric test) since both assumptions are satisfied\r\n\r\n# What is your advice to the customer?\r\n# There is no statistical difference between average bidding and maximum bidding\r\n# It can be preferred with a low cost per click.\r\n# We can evaluate the differences in interaction gain and conversion rates and determine which method is more profitable.\r\n# The test can be extended for 1 month.\r\n# The number of observations can be increased.\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from tasks import video_compress, video_upload
if __name__ == '__main__':
video_compress.apply_async(["a"],queue='high')
video_compress.apply_async(["b"],queue='low')
video_upload.apply_async(["c"], queue='low')
video_upload.apply_async(["d"], queue='high')
|
normal
|
{
"blob_id": "2cd7d4fe87de66e85bc0d060e2eaa68be39eed02",
"index": 9461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n video_compress.apply_async(['a'], queue='high')\n video_compress.apply_async(['b'], queue='low')\n video_upload.apply_async(['c'], queue='low')\n video_upload.apply_async(['d'], queue='high')\n",
"step-3": "from tasks import video_compress, video_upload\nif __name__ == '__main__':\n video_compress.apply_async(['a'], queue='high')\n video_compress.apply_async(['b'], queue='low')\n video_upload.apply_async(['c'], queue='low')\n video_upload.apply_async(['d'], queue='high')\n",
"step-4": "from tasks import video_compress, video_upload\nif __name__ == '__main__':\n video_compress.apply_async([\"a\"],queue='high')\n video_compress.apply_async([\"b\"],queue='low')\n video_upload.apply_async([\"c\"], queue='low')\n video_upload.apply_async([\"d\"], queue='high')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask_opencv_streamer.streamer import Streamer
import cv2
import numpy as np
MASK = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
])
port = 3030
require_login = False
streamer = Streamer(port, require_login)
video_capture = cv2.VideoCapture('http://149.43.156.105/mjpg/video.mjpg')
while True:
_, frame = video_capture.read()
frame = cv2.medianBlur(frame, 3)
frame = cv2.filter2D(frame, -1, MASK)
_, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)
streamer.update_frame(frame)
if not streamer.is_streaming:
streamer.start_streaming()
# было в примере, но вроде и без этого работает
# cv2.waitKey(30)
|
normal
|
{
"blob_id": "a19b4928c9423dae6c60f39dbc5af0673b433c8e",
"index": 3551,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n _, frame = video_capture.read()\n frame = cv2.medianBlur(frame, 3)\n frame = cv2.filter2D(frame, -1, MASK)\n _, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)\n streamer.update_frame(frame)\n if not streamer.is_streaming:\n streamer.start_streaming()\n",
"step-3": "<mask token>\nMASK = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])\nport = 3030\nrequire_login = False\nstreamer = Streamer(port, require_login)\nvideo_capture = cv2.VideoCapture('http://149.43.156.105/mjpg/video.mjpg')\nwhile True:\n _, frame = video_capture.read()\n frame = cv2.medianBlur(frame, 3)\n frame = cv2.filter2D(frame, -1, MASK)\n _, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)\n streamer.update_frame(frame)\n if not streamer.is_streaming:\n streamer.start_streaming()\n",
"step-4": "from flask_opencv_streamer.streamer import Streamer\nimport cv2\nimport numpy as np\nMASK = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])\nport = 3030\nrequire_login = False\nstreamer = Streamer(port, require_login)\nvideo_capture = cv2.VideoCapture('http://149.43.156.105/mjpg/video.mjpg')\nwhile True:\n _, frame = video_capture.read()\n frame = cv2.medianBlur(frame, 3)\n frame = cv2.filter2D(frame, -1, MASK)\n _, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)\n streamer.update_frame(frame)\n if not streamer.is_streaming:\n streamer.start_streaming()\n",
"step-5": "from flask_opencv_streamer.streamer import Streamer\r\nimport cv2\r\nimport numpy as np\r\n\r\nMASK = np.array([\r\n [0, 1, 0],\r\n [1, -4, 1],\r\n [0, 1, 0]\r\n])\r\n\r\nport = 3030\r\nrequire_login = False\r\nstreamer = Streamer(port, require_login)\r\n\r\nvideo_capture = cv2.VideoCapture('http://149.43.156.105/mjpg/video.mjpg')\r\n\r\nwhile True:\r\n _, frame = video_capture.read()\r\n\r\n frame = cv2.medianBlur(frame, 3)\r\n frame = cv2.filter2D(frame, -1, MASK)\r\n _, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)\r\n streamer.update_frame(frame)\r\n\r\n if not streamer.is_streaming:\r\n streamer.start_streaming()\r\n # было в примере, но вроде и без этого работает\r\n # cv2.waitKey(30)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ?????
c=0
for i in range(12):
if 'r' in input():
c+=1
# ??
print(c)
|
normal
|
{
"blob_id": "294b0dc7587ecd37887591da5a1afe96a4349f6b",
"index": 8711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-3": "c = 0\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-4": "# ?????\r\nc=0\r\n\r\nfor i in range(12):\r\n if 'r' in input():\r\n c+=1\r\n\r\n# ??\r\nprint(c)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
import sys
import os
import math
import random
if hasattr(sys, '__interactivehook__'):
del sys.__interactivehook__
print('Python3 startup file loaded from ~/.config/pystartup.py')
|
normal
|
{
"blob_id": "5ddde3aa6eaa30b70743272a532874663067eed6",
"index": 3157,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n",
"step-3": "import sys\nimport os\nimport math\nimport random\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n",
"step-4": "#!/usr/bin/env python3\n\nimport sys\nimport os\nimport math\nimport random\n\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\n\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
#
# compare-sorts.py
# Copyright (c) 2017 Dylan Brown. All rights reserved.
#
# Use Python 3. Run from within the scripts/ directory.
import os
import sys
import re
import subprocess
# Ensure we don't silently fail by running Python 2.
assert sys.version_info[0] >= 3, "This script requires Python 3.x"
assert os.getcwd().split("/")[-1] == "algorithms-sedgewick-wayne", \
"This script must be run from the project's root directory."
# Number of iterations to average over.
N = 25
# Data file to sort.
# DATA = "./algs4-data/words3.txt"
DATA = "./algs4-data/medTale.txt"
def main():
sorts = ["selection-sort",
"insertion-sort",
"shell-sort"]
for sort in sorts:
exe_path = "./build/{}".format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError("The executable {} does not exist.".format(exe_path))
accumulated_time = 0
for i in range(N):
# Note shell=True has security implications. Don't accept external inputs.
b_output = subprocess.check_output(" ".join([exe_path, DATA]), shell=True)
str_output = str(b_output)
# Use regex to extract the number follwing "(ns) =" in the output.
accumulated_time += int(re.findall("\d+", str_output)[0]) # Elapsed time in nanoseconds.
average_time = accumulated_time / N
if "selection-sort" == sort:
print("{:>14} took {:>8} ns on average.".format(sort, int(average_time)))
sel_sort_time = average_time
else:
print("{:>14} took {:>8} ns on average, "
"a {:4.1f}x speedup over selection sort.".format(sort,
int(average_time),
sel_sort_time / average_time))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "501d50fa933f55c178b4b2eba6cfc5b85592beaa",
"index": 8473,
"step-1": "<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\n<mask token>\n",
"step-2": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\n<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport re\nimport subprocess\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n#\n# compare-sorts.py\n# Copyright (c) 2017 Dylan Brown. All rights reserved.\n#\n\n# Use Python 3. Run from within the scripts/ directory.\n\nimport os\nimport sys\nimport re\nimport subprocess\n\n# Ensure we don't silently fail by running Python 2.\nassert sys.version_info[0] >= 3, \"This script requires Python 3.x\"\nassert os.getcwd().split(\"/\")[-1] == \"algorithms-sedgewick-wayne\", \\\n \"This script must be run from the project's root directory.\"\n\n# Number of iterations to average over.\nN = 25\n\n# Data file to sort.\n# DATA = \"./algs4-data/words3.txt\"\nDATA = \"./algs4-data/medTale.txt\"\n\ndef main():\n sorts = [\"selection-sort\",\n \"insertion-sort\",\n \"shell-sort\"]\n\n for sort in sorts:\n exe_path = \"./build/{}\".format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError(\"The executable {} does not exist.\".format(exe_path))\n\n accumulated_time = 0\n for i in range(N):\n # Note shell=True has security implications. Don't accept external inputs.\n b_output = subprocess.check_output(\" \".join([exe_path, DATA]), shell=True)\n str_output = str(b_output)\n # Use regex to extract the number follwing \"(ns) =\" in the output.\n accumulated_time += int(re.findall(\"\\d+\", str_output)[0]) # Elapsed time in nanoseconds.\n average_time = accumulated_time / N\n\n if \"selection-sort\" == sort:\n print(\"{:>14} took {:>8} ns on average.\".format(sort, int(average_time)))\n sel_sort_time = average_time\n else:\n print(\"{:>14} took {:>8} ns on average, \"\n \"a {:4.1f}x speedup over selection sort.\".format(sort,\n int(average_time),\n sel_sort_time / average_time))\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Length, EqualTo, ValidationError
from passlib.hash import pbkdf2_sha256
from models import User
def invalid_credentials(form, field):
''' Username and password checker '''
username_entered = form.username.data
password_entered = field.data
user_object = User.query.filter_by(username=username_entered).first()
if user_object is None:
raise ValidationError("Username or password is incorrect")
elif not pbkdf2_sha256.verify(password_entered, user_object.password):
raise ValidationError("Username or password is incorrect")
class JoinRoomForm(FlaskForm):
room_code = StringField('room_code', validators=[InputRequired(message="Room Code Required")])
username = StringField('username', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class CreateRoomForm(FlaskForm):
username = StringField('username', validators=[InputRequired(message="Username Required")])
numplayers = StringField('numplayers', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class CreateUsernameForm(FlaskForm):
username = StringField('username', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class RegistrationForm(FlaskForm):
''' Registration form '''
username = StringField('username_label', validators=[InputRequired(message="Username required"),
Length(min=4, max=25, message="Username must be between 4 and 25 characters")])
password = PasswordField('password_label', validators=[InputRequired(message="Password required"),
Length(min=4, message="Password must be at least 4 characters")])
confirm_pswd = PasswordField('confirm_pswd_label', validators=[InputRequired(message="Please retype your password"),
EqualTo('password', message="Passwords must match")])
submit_button = SubmitField('Click Here to Start')
def validate_username(self, username):
user_object = User.query.filter_by(username=username.data).first()
if user_object:
raise ValidationError("Username already exists. Select a different username")
class LoginForm(FlaskForm):
''' Login form '''
username = StringField('username_label', validators=[InputRequired(message="Username required")])
password = PasswordField('password_label', validators=[InputRequired(message="Password required"), invalid_credentials])
submit_button = SubmitField('Login')
|
normal
|
{
"blob_id": "623bd858923d5f9cc109af586fdda01cd3d5fff3",
"index": 961,
"step-1": "<mask token>\n\n\nclass CreateRoomForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"step-2": "<mask token>\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"step-3": "<mask token>\n\n\nclass JoinRoomForm(FlaskForm):\n room_code = StringField('room_code', validators=[InputRequired(message=\n 'Room Code Required')])\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"step-4": "<mask token>\n\n\ndef invalid_credentials(form, field):\n \"\"\" Username and password checker \"\"\"\n username_entered = form.username.data\n password_entered = field.data\n user_object = User.query.filter_by(username=username_entered).first()\n if user_object is None:\n raise ValidationError('Username or password is incorrect')\n elif not pbkdf2_sha256.verify(password_entered, user_object.password):\n raise ValidationError('Username or password is incorrect')\n\n\nclass JoinRoomForm(FlaskForm):\n room_code = StringField('room_code', validators=[InputRequired(message=\n 'Room Code Required')])\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"step-5": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField\nfrom wtforms.validators import InputRequired, Length, EqualTo, ValidationError\nfrom passlib.hash import pbkdf2_sha256\nfrom models import User\n\ndef invalid_credentials(form, field):\n\t''' Username and password checker '''\n\n\tusername_entered = form.username.data\n\tpassword_entered = field.data\n\n\tuser_object = User.query.filter_by(username=username_entered).first()\n\tif user_object is None:\n\t\traise ValidationError(\"Username or password is incorrect\")\n\telif not pbkdf2_sha256.verify(password_entered, user_object.password):\n\t\traise ValidationError(\"Username or password is incorrect\")\n\nclass JoinRoomForm(FlaskForm):\n\troom_code = StringField('room_code', validators=[InputRequired(message=\"Room Code Required\")])\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\nclass CreateRoomForm(FlaskForm):\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\tnumplayers = StringField('numplayers', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\n\nclass CreateUsernameForm(FlaskForm):\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\n\nclass RegistrationForm(FlaskForm):\n\t''' Registration form '''\n\n\tusername = StringField('username_label', validators=[InputRequired(message=\"Username required\"),\n\t\tLength(min=4, max=25, message=\"Username must be between 4 and 25 characters\")])\n\tpassword = PasswordField('password_label', validators=[InputRequired(message=\"Password required\"),\n\t\tLength(min=4, message=\"Password must be at least 4 characters\")])\n\tconfirm_pswd = PasswordField('confirm_pswd_label', validators=[InputRequired(message=\"Please retype your password\"),\n\t\tEqualTo('password', message=\"Passwords must match\")])\n\tsubmit_button = SubmitField('Click Here to Start')\n\n\tdef validate_username(self, username):\n\t\tuser_object = User.query.filter_by(username=username.data).first()\n\t\tif user_object:\n\t\t\traise ValidationError(\"Username already exists. Select a different username\")\n\n\nclass LoginForm(FlaskForm):\n\t''' Login form '''\n\n\tusername = StringField('username_label', validators=[InputRequired(message=\"Username required\")])\n\tpassword = PasswordField('password_label', validators=[InputRequired(message=\"Password required\"), invalid_credentials])\n\tsubmit_button = SubmitField('Login')\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
import boto3
import os
from trustedadvisor import authenticate_support
accountnumber = os.environ['Account_Number']
rolename = os.environ['Role_Name']
rolesession = accountnumber + rolename
def lambda_handler(event, context):
sts_client = boto3.client('sts')
assumerole = sts_client.assume_role(
RoleArn="arn:aws:iam::" + accountnumber + ":role/" + rolename,
RoleSessionName=rolesession
)
credentials = assumerole['Credentials']
return authenticate_support(credentials)
|
normal
|
{
"blob_id": "539431649e54469ddbe44fdbd17031b4449abdd9",
"index": 5867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-3": "<mask token>\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-4": "import boto3\nimport os\nfrom trustedadvisor import authenticate_support\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(RoleArn='arn:aws:iam::' +\n accountnumber + ':role/' + rolename, RoleSessionName=rolesession)\n credentials = assumerole['Credentials']\n return authenticate_support(credentials)\n",
"step-5": "import boto3\nimport os\n\nfrom trustedadvisor import authenticate_support\n\naccountnumber = os.environ['Account_Number']\nrolename = os.environ['Role_Name']\nrolesession = accountnumber + rolename\n\n\ndef lambda_handler(event, context):\n sts_client = boto3.client('sts')\n assumerole = sts_client.assume_role(\n RoleArn=\"arn:aws:iam::\" + accountnumber + \":role/\" + rolename,\n RoleSessionName=rolesession\n )\n\n credentials = assumerole['Credentials']\n\n return authenticate_support(credentials)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from . import models
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':"oldPassword"}))
password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label="Password")
password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')
|
normal
|
{
"blob_id": "503726cd2d70286189f4b8e02acaa3d5f6e29e12",
"index": 8538,
"step-1": "<mask token>\n\n\nclass ChangeEmail(forms.Form):\n <mask token>\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-2": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-3": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-4": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-5": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':\"oldPassword\"}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\"Password\")\n password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#!/usr/bin/python2
import md5
from pwn import *
import time
LIMIT = 500
TARGET = "shell2017.picoctf.com"
PORT = 46290
FILE = "hash.txt"
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array,current_hash):
return array[array.index(current_hash)-1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
#there's a bunch of crap that needs to be skipped for recvline()
skip_intro(conn)
skip_question(conn)
conn.sendline("r")
def extract_hash_id():
conn = remote(TARGET,PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed),current_hash)
conn.sendline(prev_hash)
#Yep is in the validated hash, so we will use this as the success metric
if "Yep!" in conn.recvline():
conn.close()
return (hash_id, seed)
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open("hash.txt","a") as fp:
for tup in hash_list:
fp.write("{} {}\n".format(tup[0],tup[1]))
#I went back into the code to use this function whenever I found a match in my hash text file
# print(find_prev_hash(generate_hashes("ead81fe8cfe9fda9e4c2093e17e4d024"),"58cb392a127b699c6f22f228e23ae73e"))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "5e78992df94cbbe441495b7d8fb80104ec000748",
"index": 6728,
"step-1": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\n<mask token>\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import md5\nfrom pwn import *\nimport time\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python2\nimport md5 \nfrom pwn import *\nimport time\n\nLIMIT = 500\nTARGET = \"shell2017.picoctf.com\"\nPORT = 46290\nFILE = \"hash.txt\"\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n \n for i in range(1000): \n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n \n return a\n\ndef find_prev_hash(array,current_hash):\n return array[array.index(current_hash)-1]\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\ndef go_to_register(conn):\n #there's a bunch of crap that needs to be skipped for recvline()\n skip_intro(conn)\n skip_question(conn)\n conn.sendline(\"r\")\n \ndef extract_hash_id():\n conn = remote(TARGET,PORT) \n go_to_register(conn)\n \n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n \n prev_hash = find_prev_hash(generate_hashes(seed),current_hash)\n conn.sendline(prev_hash)\n \n #Yep is in the validated hash, so we will use this as the success metric\n if \"Yep!\" in conn.recvline():\n conn.close()\n return (hash_id, seed)\n conn.close()\n return None\n \ndef main(): \n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open(\"hash.txt\",\"a\") as fp:\n for tup in hash_list:\n fp.write(\"{} {}\\n\".format(tup[0],tup[1])) \n\n #I went back into the code to use this function whenever I found a match in my hash text file\n # print(find_prev_hash(generate_hashes(\"ead81fe8cfe9fda9e4c2093e17e4d024\"),\"58cb392a127b699c6f22f228e23ae73e\"))\n \nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
7,
9,
10,
11
]
}
|
[
5,
7,
9,
10,
11
] |
from turtle import *
from shapes import *
#1-
#1.triangle
def eTriangle():
forward(100)
right(120)
forward(100)
right(120)
forward(100)
right(120)
mainloop()
#2.square
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
#3.pentagon
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
#4.hexagon
def hexagon():
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
mainloop()
#5.octagon
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
#6.star
def star():
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
mainloop()
#7.circle
def circle():
for i in range(370):
forward(2)
right(1)
mainloop()
#2-
from shapes import *
eTriangle()
square()
pentagon()
hexagon()
octagon()
star()
circle()
mainloop()
#3-
bgcolor("MidnightBlue")
starp(20, True, "yellow", "MidnightBlue")
right (20)
forward(100)
starp(20, True, "yellow", "MidnightBlue")
right (30)
forward(150)
starp(20, True, "yellow", "MidnightBlue")
right (40)
forward(200)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward(250)
starp(20, True, "yellow", "MidnightBlue")
right (60)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
forward(100)
starp(20, True, "yellow", "MidnightBlue")
forward(100)
starp(20, True, "yellow", "MidnightBlue")
left (90)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward (300)
starp(20, True, "yellow", "MidnightBlue")
right(50)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward (275)
circlep(3, True, "SlateGrey", "MidnightBlue")
right(60)
forward(20)
mainloop()
#4-
bgcolor("skyblue")
right(90)
penup()
forward(100)
right(90)
forward(200)
fillcolor("Green")
begin_fill()
forward (300)
left(90)
forward (300)
left(90)
forward(1250)
left(90)
forward(300)
left(90)
forward(1000)
end_fill()
right (90)
pendown()
rectangle(200, 450, True, "Red")
left(180)
forward(200)
left(90)
penup()
forward(100)
right(90)
pendown
rectangle(50, 100, True, "Brown")
penup()
right(90)
forward(50)
right(90)
forward(50)
right (90)
forward(10)
circle(.1, True, "Black")
penup()
forward(40)
left(90)
forward(50)
pendown()
fillcolor("grey")
begin_fill()
left (20)
forward(400)
left (75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
right(5)
penup()
forward(200)
right(90)
forward(200)
right(90)
left(40)
pendown()
fillcolor("brown")
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
penup()
left(90)
forward(75)
left(90)
forward(75)
pendown()
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
penup()
left(90)
forward(25)
right(90)
forward(200)
pendown()
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
penup()
left(90)
forward(250)
left (90)
forward(400)
circlep(3, True, "yellow", "yellow")
mainloop()
#5-
def door():
rectangle(50, 100, True, "Brown")
penup()
right(90)
forward(50)
right(90)
forward(50)
right (90)
forward(10)
circle(.1, True, "Black")
def grass():
fillcolor("Green")
begin_fill()
forward (300)
left(90)
forward (300)
left(90)
forward(1250)
left(90)
forward(300)
left(90)
forward(1000)
end_fill()
def house():
rectangle(200, 450, True, "Red")
def roof():
fillcolor("brown")
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
def window():
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
def sun():
circlep(3, True, "yellow", "yellow")
def sidewalk():
fillcolor("grey")
begin_fill()
left (20)
forward(400)
left (75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
bgcolor("skyblue")
right(90)
penup()
forward(100)
right(90)
forward(200)
grass()
right (90)
pendown()
house()
left(180)
forward(200)
left(90)
penup()
forward(100)
right(90)
pendown
door()
penup()
forward(40)
left(90)
forward(50)
pendown()
sidewalk()
right(5)
penup()
forward(200)
right(90)
forward(200)
right(90)
left(40)
pendown()
roof()
penup()
left(90)
forward(75)
left(90)
forward(75)
pendown()
window()
penup()
left(90)
forward(25)
right(90)
forward(200)
pendown()
window()
penup()
left(90)
forward(250)
left (90)
forward(400)
sun()
mainloop()
#6-
import random
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
def craysquare():
color("green", "Blue")
begin_fill()
for i in range(12):
for i in range(4):
forward(60)
right(90)
for i in range(12):
forward (random.randint(1,60))
right(90)
end_fill()
craysquare()
forward (50)
craysquare()
forward (50)
craysquare()
forward (50)
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
forward(random.randint(1,100))
right(random.randint(1, 90))
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
forward(random.randint(1,100))
right(random.randint(1, 90))
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
mainloop()
|
normal
|
{
"blob_id": "92d689e5caa2d8c65f86af0f8b49b009d162a783",
"index": 7379,
"step-1": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\ndef roof():\n fillcolor('brown')\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef eTriangle():\n forward(100)\n right(120)\n forward(100)\n right(120)\n forward(100)\n right(120)\n mainloop()\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n\n<mask token>\n\n\ndef door():\n rectangle(50, 100, True, 'Brown')\n penup()\n right(90)\n forward(50)\n right(90)\n forward(50)\n right(90)\n forward(10)\n circle(0.1, True, 'Black')\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\ndef roof():\n fillcolor('brown')\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-5": "from turtle import *\nfrom shapes import *\n#1-\n #1.triangle\ndef eTriangle():\n forward(100)\n right(120)\n forward(100)\n right(120)\n forward(100)\n right(120)\n mainloop()\n #2.square\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n #3.pentagon\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n #4.hexagon\ndef hexagon():\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n mainloop()\n #5.octagon\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n #6.star\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n #7.circle\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n#2- \nfrom shapes import *\neTriangle()\nsquare()\npentagon()\nhexagon()\noctagon()\nstar()\ncircle()\nmainloop()\n\n#3- \nbgcolor(\"MidnightBlue\")\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (20)\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (30)\nforward(150)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (40)\nforward(200)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (50)\nforward(250)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (60)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nleft (90)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (50)\nforward (300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright(50)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nright (50)\n\nforward (275)\n\ncirclep(3, True, \"SlateGrey\", \"MidnightBlue\")\nright(60)\nforward(20)\n\n\n\nmainloop()\n\n\n#4- \nbgcolor(\"skyblue\")\nright(90)\npenup()\nforward(100)\nright(90)\nforward(200)\n\nfillcolor(\"Green\")\nbegin_fill()\nforward (300)\nleft(90)\nforward (300)\nleft(90)\nforward(1250)\nleft(90)\nforward(300)\nleft(90)\nforward(1000)\nend_fill()\n\nright (90)\n\npendown()\nrectangle(200, 450, True, \"Red\")\nleft(180)\nforward(200)\nleft(90)\npenup()\nforward(100)\nright(90)\npendown\nrectangle(50, 100, True, \"Brown\")\n\npenup()\nright(90)\nforward(50)\nright(90)\nforward(50)\nright (90)\nforward(10)\n\ncircle(.1, True, \"Black\")\n\npenup()\nforward(40)\nleft(90)\nforward(50)\n\npendown()\nfillcolor(\"grey\")\nbegin_fill()\nleft (20)\nforward(400)\nleft (75)\nforward(50)\nleft(105)\nforward(400)\nleft(75)\nforward(50)\nend_fill()\n\nright(5)\npenup()\nforward(200)\nright(90)\nforward(200)\nright(90)\nleft(40)\npendown()\nfillcolor(\"brown\")\nbegin_fill()\nforward(293.717)\nright(80)\nforward(293.717)\nright(140)\nforward(450)\nend_fill()\n\npenup()\nleft(90)\nforward(75)\nleft(90)\nforward(75)\npendown()\nsquare(50, True, \"blue\", \"Black\")\nright(90)\nsquare(25, False, \"blue\", \"black\")\nright(90)\nforward(50)\nright(90)\nforward(25)\nsquare(25, False, \"blue\", \"black\")\n\npenup()\nleft(90)\nforward(25)\nright(90)\nforward(200)\npendown()\nsquare(50, True, \"blue\", \"Black\")\nright(90)\nsquare(25, False, \"blue\", \"black\")\nright(90)\nforward(50)\nright(90)\nforward(25)\nsquare(25, False, \"blue\", \"black\")\n\npenup()\nleft(90)\nforward(250)\nleft (90)\nforward(400)\n\ncirclep(3, True, \"yellow\", \"yellow\")\nmainloop()\n\n#5- \n\ndef door():\n rectangle(50, 100, True, \"Brown\")\n penup()\n right(90)\n forward(50)\n right(90)\n forward(50)\n right (90)\n forward(10)\n\n circle(.1, True, \"Black\")\n\n\ndef grass():\n fillcolor(\"Green\")\n begin_fill()\n forward (300)\n left(90)\n forward (300)\n left(90)\n forward(1250)\n left(90)\n forward(300)\n left(90)\n forward(1000)\n end_fill()\n\ndef house():\n rectangle(200, 450, True, \"Red\")\n \n\ndef roof():\n fillcolor(\"brown\")\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\ndef window():\n square(50, True, \"blue\", \"Black\")\n right(90)\n square(25, False, \"blue\", \"black\")\n right(90)\n forward(50)\n right(90)\n forward(25)\n square(25, False, \"blue\", \"black\")\n\ndef sun():\n circlep(3, True, \"yellow\", \"yellow\")\n\ndef sidewalk():\n fillcolor(\"grey\")\n begin_fill()\n left (20)\n forward(400)\n left (75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n\n\n\n\n\n\nbgcolor(\"skyblue\")\nright(90)\npenup()\nforward(100)\nright(90)\nforward(200)\n\ngrass()\n\nright (90)\npendown()\n\nhouse()\n\nleft(180)\nforward(200)\nleft(90)\npenup()\nforward(100)\nright(90)\npendown\n\ndoor()\n\npenup()\nforward(40)\nleft(90)\nforward(50)\npendown()\n\nsidewalk()\n\nright(5)\npenup()\nforward(200)\nright(90)\nforward(200)\nright(90)\nleft(40)\npendown()\n\nroof()\n\npenup()\nleft(90)\nforward(75)\nleft(90)\nforward(75)\n\npendown()\n\nwindow()\n\npenup()\nleft(90)\nforward(25)\nright(90)\nforward(200)\npendown()\n\nwindow()\n\npenup()\nleft(90)\nforward(250)\nleft (90)\nforward(400)\n\nsun()\nmainloop()\n\n#6- \nimport random\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill() \n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n \n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\ndef craysquare():\n color(\"green\", \"Blue\")\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward (random.randint(1,60))\n right(90)\n end_fill()\n\n\ncraysquare()\nforward (50)\ncraysquare()\nforward (50)\ncraysquare()\nforward (50)\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nmainloop()",
"step-ids": [
8,
10,
12,
14,
20
]
}
|
[
8,
10,
12,
14,
20
] |
"""Test the various means of instantiating and invoking filters."""
import types
import test
test.prefer_parent_path()
import cherrypy
from cherrypy import filters
from cherrypy.filters.basefilter import BaseFilter
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get("access_filter.on", False):
return
if not getattr(cherrypy.request, "login", None):
raise cherrypy.HTTPError(401)
def setup_server():
class Numerify(BaseFilter):
def on_start_resource(self):
m = cherrypy.config.get("numerify_filter.map", {})
cherrypy.request.numerify_map = m.items()
def before_finalize(self):
if not cherrypy.config.get("numerify_filter.on", False):
return
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
# It's not mandatory to inherit from BaseFilter.
class NadsatFilter:
def __init__(self):
self.counter = 0
self.ended = {}
def before_main(self):
cherrypy.request.counter = self.counter = self.counter + 1
self.ended[cherrypy.request.counter] = False
def before_finalize(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace("good", "horrorshow")
chunk = chunk.replace("piece", "lomtick")
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
def on_end_request(self):
# This runs after the request has been completely written out.
cherrypy.response.body = "razdrez"
self.ended[cherrypy.request.counter] = True
class Root:
def index(self):
return "Howdy earth!"
index.exposed = True
cherrypy.root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of cherrypy.root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(cherrypy.root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class CPFilterList(Test):
# METHOD ONE:
# Use _cp_filters (old name: _cpFilterList)
_cp_filters = [NadsatFilter()]
def index(self):
return "A good piece of cherry pie"
def ended(self, id):
return repr(self._cp_filters[0].ended[int(id)])
def err(self):
raise ValueError()
def errinstream(self):
raise ValueError()
yield "confidential"
def restricted(self):
return "Welcome!"
def err_in_onstart(self):
return "success!"
cherrypy.config.update({
'global': {
# METHOD TWO:
# Declare a classname in server.input_filters.
'server.input_filters': ["cherrypy.test.test_custom_filters.AccessFilter"],
'server.log_to_screen': False,
'server.environment': 'production',
'server.show_tracebacks': True,
},
'/cpfilterlist': {
'numerify_filter.on': True,
'numerify_filter.map': {"pie": "3.14159"}
},
'/cpfilterlist/restricted': {
'access_filter.on': True,
'server.show_tracebacks': False,
},
'/cpfilterlist/errinstream': {
'stream_response': True,
},
'/cpfilterlist/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'numerify_filter.map': "pie->3.14159"
},
})
# METHOD THREE:
# Insert a class directly into the filters.output_filters chain.
# You can also insert a string, but we're effectively testing
# using-a-string via the config file.
filters.input_filters.insert(0, Numerify)
filters.output_filters.insert(0, Numerify)
# We have to call filters.init() here (if we want methods #2 and #3
# to work), because the test suite may already have run server.start()
# (which is where filters.init() is usually called).
filters.init()
# Client-side code #
import helper
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage("/cpfilterlist/")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/cpfilterlist/err")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(500, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
self.getPage("/cpfilterlist/errinstream")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("Unrecoverable error in the server.")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/5")
self.assertBody("True")
# Test the config method.
self.getPage("/cpfilterlist/restricted")
self.assertErrorPage(401)
def testGuaranteedFilters(self):
# The on_start_resource and on_end_request filter methods are all
# guaranteed to run, even if there are failures in other on_start
# or on_end methods. This is NOT true of the other filter methods.
# Here, we have set up a failure in NumerifyFilter.on_start_resource,
# but because that failure is logged and passed over, the error
# page we obtain in the user agent should be from before_finalize.
self.getPage("/cpfilterlist/err_in_onstart")
self.assertErrorPage(500)
self.assertInBody("AttributeError: 'Request' object has no "
"attribute 'numerify_map'")
if __name__ == '__main__':
setup_server()
helper.testmain()
|
normal
|
{
"blob_id": "8a412231c13df1b364b6e2a27549730d06048186",
"index": 9978,
"step-1": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n <mask token>\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-3": "<mask token>\ntest.prefer_parent_path()\n<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-4": "<mask token>\nimport types\nimport test\ntest.prefer_parent_path()\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-5": "\"\"\"Test the various means of instantiating and invoking filters.\"\"\"\n\nimport types\nimport test\ntest.prefer_parent_path()\n\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n \n def before_request_body(self):\n if not cherrypy.config.get(\"access_filter.on\", False):\n return\n \n if not getattr(cherrypy.request, \"login\", None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n class Numerify(BaseFilter):\n \n def on_start_resource(self):\n m = cherrypy.config.get(\"numerify_filter.map\", {})\n cherrypy.request.numerify_map = m.items()\n \n def before_finalize(self):\n if not cherrypy.config.get(\"numerify_filter.on\", False):\n return\n \n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n \n \n # It's not mandatory to inherit from BaseFilter.\n class NadsatFilter:\n \n def __init__(self):\n self.counter = 0\n self.ended = {}\n \n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n \n def before_finalize(self):\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace(\"good\", \"horrorshow\")\n chunk = chunk.replace(\"piece\", \"lomtick\")\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n \n def on_end_request(self):\n # This runs after the request has been completely written out.\n cherrypy.response.body = \"razdrez\"\n self.ended[cherrypy.request.counter] = True\n\n\n\n class Root:\n def index(self):\n return \"Howdy earth!\"\n index.exposed = True\n\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n \n # METHOD ONE:\n # Use _cp_filters (old name: _cpFilterList)\n _cp_filters = [NadsatFilter()]\n \n def index(self):\n return \"A good piece of cherry pie\"\n \n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n \n def err(self):\n raise ValueError()\n \n def errinstream(self):\n raise ValueError()\n yield \"confidential\"\n \n def restricted(self):\n return \"Welcome!\"\n \n def err_in_onstart(self):\n return \"success!\"\n\n\n cherrypy.config.update({\n 'global': {\n # METHOD TWO:\n # Declare a classname in server.input_filters.\n 'server.input_filters': [\"cherrypy.test.test_custom_filters.AccessFilter\"],\n 'server.log_to_screen': False,\n 'server.environment': 'production',\n 'server.show_tracebacks': True,\n },\n '/cpfilterlist': {\n 'numerify_filter.on': True,\n 'numerify_filter.map': {\"pie\": \"3.14159\"}\n },\n '/cpfilterlist/restricted': {\n 'access_filter.on': True,\n 'server.show_tracebacks': False,\n },\n '/cpfilterlist/errinstream': {\n 'stream_response': True,\n },\n '/cpfilterlist/err_in_onstart': {\n # Because this isn't a dict, on_start_resource will error.\n 'numerify_filter.map': \"pie->3.14159\"\n },\n })\n\n # METHOD THREE:\n # Insert a class directly into the filters.output_filters chain.\n # You can also insert a string, but we're effectively testing\n # using-a-string via the config file.\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n\n # We have to call filters.init() here (if we want methods #2 and #3\n # to work), because the test suite may already have run server.start()\n # (which is where filters.init() is usually called).\n filters.init()\n\n\n# Client-side code #\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n \n def testCPFilterList(self):\n self.getPage(\"/cpfilterlist/\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertBody(\"A horrorshow lomtick of cherry 3.14159\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/1\")\n self.assertBody(\"True\")\n \n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage(\"/cpfilterlist/err\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertErrorPage(500, pattern=valerr)\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/3\")\n self.assertBody(\"True\")\n \n # If body is \"razdrez\", then on_end_request is being called too early.\n self.getPage(\"/cpfilterlist/errinstream\")\n # Because this error is raised after the response body has\n # started, the status should not change to an error status.\n self.assertStatus(\"200 OK\")\n self.assertBody(\"Unrecoverable error in the server.\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/5\")\n self.assertBody(\"True\")\n \n # Test the config method.\n self.getPage(\"/cpfilterlist/restricted\")\n self.assertErrorPage(401)\n \n def testGuaranteedFilters(self):\n # The on_start_resource and on_end_request filter methods are all\n # guaranteed to run, even if there are failures in other on_start\n # or on_end methods. This is NOT true of the other filter methods.\n # Here, we have set up a failure in NumerifyFilter.on_start_resource,\n # but because that failure is logged and passed over, the error\n # page we obtain in the user agent should be from before_finalize.\n self.getPage(\"/cpfilterlist/err_in_onstart\")\n self.assertErrorPage(500)\n self.assertInBody(\"AttributeError: 'Request' object has no \"\n \"attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# -*- coding: utf-8 -*-
"""Test custom node separator."""
import six
from helper import assert_raises, eq_
import anytree as at
class MyNode(at.Node):
separator = "|"
def test_render():
"""Render string cast."""
root = MyNode("root")
s0 = MyNode("sub0", parent=root)
MyNode("sub0B", parent=s0)
MyNode("sub0A", parent=s0)
MyNode("sub1", parent=root)
r = at.RenderTree(root)
expected = "\n".join(
[
"MyNode('|root')",
"├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')",
"└── MyNode('|root|sub1')",
]
)
if six.PY2:
eq_(str(r).decode("utf-8"), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0sub0", parent=sub0)
sub0sub1 = MyNode("sub0sub1", parent=sub0)
sub1 = MyNode("sub1", parent=top)
r = at.Resolver("name")
eq_(r.get(top, "sub0|sub0sub0"), sub0sub0)
eq_(r.get(sub1, ".."), top)
eq_(r.get(sub1, "..|sub0|sub0sub1"), sub0sub1)
eq_(r.get(sub1, "."), sub1)
eq_(r.get(sub1, ""), sub1)
with assert_raises(at.ChildResolverError, "MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, "sub2")
eq_(r.get(sub0sub0, "|top"), top)
eq_(r.get(sub0sub0, "|top|sub0"), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, "|")
with assert_raises(at.ResolverError, "unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, "|bar")
def test_glob():
"""Wildcard."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0", parent=sub0)
sub0sub1 = MyNode("sub1", parent=sub0)
sub0sub1sub0 = MyNode("sub0", parent=sub0sub1)
MyNode("sub1", parent=sub0sub1)
sub1 = MyNode("sub1", parent=top)
sub1sub0 = MyNode("sub0", parent=sub1)
r = at.Resolver()
eq_(r.glob(top, "*|*|sub0"), [sub0sub1sub0])
eq_(r.glob(top, "sub0|sub?"), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, "..|.|*"), [sub0, sub1])
eq_(r.glob(top, "*|*"), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, "*|sub0"), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError, "MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, "sub1|sub1")
|
normal
|
{
"blob_id": "a430b4629ee06dbfb267f839599383624e37451e",
"index": 4582,
"step-1": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-3": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-4": "<mask token>\nimport six\nfrom helper import assert_raises, eq_\nimport anytree as at\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test custom node separator.\"\"\"\n\nimport six\nfrom helper import assert_raises, eq_\n\nimport anytree as at\n\n\nclass MyNode(at.Node):\n\n separator = \"|\"\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode(\"root\")\n s0 = MyNode(\"sub0\", parent=root)\n MyNode(\"sub0B\", parent=s0)\n MyNode(\"sub0A\", parent=s0)\n MyNode(\"sub1\", parent=root)\n r = at.RenderTree(root)\n\n expected = \"\\n\".join(\n [\n \"MyNode('|root')\",\n \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\",\n \"└── MyNode('|root|sub1')\",\n ]\n )\n if six.PY2:\n eq_(str(r).decode(\"utf-8\"), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub0sub1\", parent=sub0)\n sub1 = MyNode(\"sub1\", parent=top)\n r = at.Resolver(\"name\")\n eq_(r.get(top, \"sub0|sub0sub0\"), sub0sub0)\n eq_(r.get(sub1, \"..\"), top)\n eq_(r.get(sub1, \"..|sub0|sub0sub1\"), sub0sub1)\n eq_(r.get(sub1, \".\"), sub1)\n eq_(r.get(sub1, \"\"), sub1)\n with assert_raises(at.ChildResolverError, \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, \"sub2\")\n eq_(r.get(sub0sub0, \"|top\"), top)\n eq_(r.get(sub0sub0, \"|top|sub0\"), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, \"|\")\n with assert_raises(at.ResolverError, \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, \"|bar\")\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub1\", parent=sub0)\n sub0sub1sub0 = MyNode(\"sub0\", parent=sub0sub1)\n MyNode(\"sub1\", parent=sub0sub1)\n sub1 = MyNode(\"sub1\", parent=top)\n sub1sub0 = MyNode(\"sub0\", parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, \"*|*|sub0\"), [sub0sub1sub0])\n\n eq_(r.glob(top, \"sub0|sub?\"), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, \"..|.|*\"), [sub0, sub1])\n eq_(r.glob(top, \"*|*\"), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, \"*|sub0\"), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError, \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, \"sub1|sub1\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from .base import BaseEngine
import re
class YandexSearch(BaseEngine):
base_url = "https://yandex.com"
search_url = "https://yandex.com/search/"
def get_params(self, query, **params):
params["text"] = query
params["p"] = None
return params
def next_url(self, soup):
if (regex := re.findall(r'"(/search/\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_="serp-item"):
if (url := raw.a.get("href")):
yield url
def captcha(self, response):
return "showcaptcha" in response.url
|
normal
|
{
"blob_id": "0ec3ca0f952dbc09c7a7a3e746c0aeab28ee9834",
"index": 6498,
"step-1": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n <mask token>\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-2": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-3": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-4": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-5": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = \"https://yandex.com\"\n search_url = \"https://yandex.com/search/\"\n\n def get_params(self, query, **params):\n params[\"text\"] = query\n params[\"p\"] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall(r'\"(/search/\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_=\"serp-item\"):\n if (url := raw.a.get(\"href\")):\n yield url\n\n def captcha(self, response):\n return \"showcaptcha\" in response.url\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#Web Scraping
#Make sure you have bs4, webbrowser and request installed as your third party modules
import bs4, webbrowser, requests
try:
data = requests.get("http://en.wikipedia.org/wiki/Python")
data.raise_for_status()
my_data = bs4.BeautifulSoup(data.text, "lxml")
print("List of all the header tags: \n\n")
for the_data in my_data.find_all("a"):
try:
print(the_data.attrs["href"])
except Exception as err:
print(err)
except Exception as err:
print(err)
print("\nNo website matches your search.")
|
normal
|
{
"blob_id": "27e9635adf6109f3ab13b9d8dd5809973b61ca03",
"index": 413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n data = requests.get('http://en.wikipedia.org/wiki/Python')\n data.raise_for_status()\n my_data = bs4.BeautifulSoup(data.text, 'lxml')\n print('List of all the header tags: \\n\\n')\n for the_data in my_data.find_all('a'):\n try:\n print(the_data.attrs['href'])\n except Exception as err:\n print(err)\nexcept Exception as err:\n print(err)\n print('\\nNo website matches your search.')\n",
"step-3": "import bs4, webbrowser, requests\ntry:\n data = requests.get('http://en.wikipedia.org/wiki/Python')\n data.raise_for_status()\n my_data = bs4.BeautifulSoup(data.text, 'lxml')\n print('List of all the header tags: \\n\\n')\n for the_data in my_data.find_all('a'):\n try:\n print(the_data.attrs['href'])\n except Exception as err:\n print(err)\nexcept Exception as err:\n print(err)\n print('\\nNo website matches your search.')\n",
"step-4": "#Web Scraping\r\n#Make sure you have bs4, webbrowser and request installed as your third party modules\r\n\r\nimport bs4, webbrowser, requests\r\n\r\ntry:\r\n data = requests.get(\"http://en.wikipedia.org/wiki/Python\")\r\n data.raise_for_status()\r\n \r\n my_data = bs4.BeautifulSoup(data.text, \"lxml\")\r\n \r\n print(\"List of all the header tags: \\n\\n\")\r\n for the_data in my_data.find_all(\"a\"):\r\n try:\r\n print(the_data.attrs[\"href\"])\r\n except Exception as err:\r\n print(err)\r\n \r\nexcept Exception as err:\r\n print(err)\r\n print(\"\\nNo website matches your search.\")\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import unicode_literals
import abc
import logging
import six
import semantic_version
from lymph.utils import observables, hash_id
from lymph.core.versioning import compatible, serialize_version
logger = logging.getLogger(__name__)
# Event types propagated by Service when instances change.
ADDED = 'ADDED'
REMOVED = 'REMOVED'
UPDATED = 'UPDATED'
class ServiceInstance(object):
def __init__(self, id=None, identity=None, **info):
self.id = id
self.identity = identity if identity else hash_id(info.get('endpoint'))
self.info = {}
self.update(**info)
def update(self, **info):
version = info.pop('version', None)
if version:
version = semantic_version.Version(version)
self.version = version
self.info.update(info)
def __getattr__(self, name):
try:
return self.info[name]
except KeyError:
raise AttributeError(name)
def serialize(self):
d = {
'id': self.id,
'identity': self.identity,
'version': serialize_version(self.version),
}
d.update(self.info)
return d
@six.add_metaclass(abc.ABCMeta)
class InstanceSet(observables.Observable):
@abc.abstractmethod
def __iter__(self):
raise NotImplementedError()
def match_version(self, version):
return VersionedServiceView(self, version)
class Service(InstanceSet):
def __init__(self, name=None, instances=()):
super(Service, self).__init__()
self.name = name
self.instances = {i.id: i for i in instances}
self.version = None
def __str__(self):
return self.name
def __iter__(self):
return six.itervalues(self.instances)
def __len__(self):
return len(self.instances)
def get_instance(self, prefix):
for instance in six.itervalues(self.instances):
if instance.id.startswith(prefix):
return instance
def identities(self):
return list(self.instances.keys())
def remove(self, instance_id):
try:
instance = self.instances.pop(instance_id)
except KeyError:
pass
else:
self.notify_observers(REMOVED, instance)
def update(self, instance_id, **info):
try:
instance = self.instances[instance_id]
except KeyError:
instance = self.instances[instance_id] = ServiceInstance(**info)
self.notify_observers(ADDED, instance)
else:
instance.update(**info)
self.notify_observers(UPDATED, instance)
class VersionedServiceView(InstanceSet):
def __init__(self, service, version):
self.service = service
self.spec = compatible(version)
self.version = version
def __str__(self):
return '%s@%s' % (self.name, self.version)
@property
def name(self):
return self.service.name
def __iter__(self):
for instance in self.service:
if instance.version in self.spec:
yield instance
def observe(self, *args, **kwargs):
return self.service.observe(*args, **kwargs)
|
normal
|
{
"blob_id": "ba41f2a564f46032dbf72f7d17b2ea6deaa81b10",
"index": 4332,
"step-1": "<mask token>\n\n\nclass Service(InstanceSet):\n <mask token>\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n <mask token>\n\n def identities(self):\n return list(self.instances.keys())\n <mask token>\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass ServiceInstance(object):\n <mask token>\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n <mask token>\n <mask token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n",
"step-4": "<mask token>\n\n\nclass ServiceInstance(object):\n\n def __init__(self, id=None, identity=None, **info):\n self.id = id\n self.identity = identity if identity else hash_id(info.get('endpoint'))\n self.info = {}\n self.update(**info)\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n <mask token>\n\n def serialize(self):\n d = {'id': self.id, 'identity': self.identity, 'version':\n serialize_version(self.version)}\n d.update(self.info)\n return d\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n",
"step-5": "from __future__ import unicode_literals\nimport abc\nimport logging\n\nimport six\nimport semantic_version\n\nfrom lymph.utils import observables, hash_id\nfrom lymph.core.versioning import compatible, serialize_version\n\n\nlogger = logging.getLogger(__name__)\n\n# Event types propagated by Service when instances change.\nADDED = 'ADDED'\nREMOVED = 'REMOVED'\nUPDATED = 'UPDATED'\n\n\nclass ServiceInstance(object):\n def __init__(self, id=None, identity=None, **info):\n self.id = id\n self.identity = identity if identity else hash_id(info.get('endpoint'))\n self.info = {}\n self.update(**info)\n\n def update(self, **info):\n version = info.pop('version', None)\n if version:\n version = semantic_version.Version(version)\n self.version = version\n self.info.update(info)\n\n def __getattr__(self, name):\n try:\n return self.info[name]\n except KeyError:\n raise AttributeError(name)\n\n def serialize(self):\n d = {\n 'id': self.id,\n 'identity': self.identity,\n 'version': serialize_version(self.version),\n }\n d.update(self.info)\n return d\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass InstanceSet(observables.Observable):\n @abc.abstractmethod\n def __iter__(self):\n raise NotImplementedError()\n\n def match_version(self, version):\n return VersionedServiceView(self, version)\n\n\nclass Service(InstanceSet):\n def __init__(self, name=None, instances=()):\n super(Service, self).__init__()\n self.name = name\n self.instances = {i.id: i for i in instances}\n self.version = None\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n return six.itervalues(self.instances)\n\n def __len__(self):\n return len(self.instances)\n\n def get_instance(self, prefix):\n for instance in six.itervalues(self.instances):\n if instance.id.startswith(prefix):\n return instance\n\n def identities(self):\n return list(self.instances.keys())\n\n def remove(self, instance_id):\n try:\n instance = self.instances.pop(instance_id)\n except KeyError:\n pass\n else:\n self.notify_observers(REMOVED, instance)\n\n def update(self, instance_id, **info):\n try:\n instance = self.instances[instance_id]\n except KeyError:\n instance = self.instances[instance_id] = ServiceInstance(**info)\n self.notify_observers(ADDED, instance)\n else:\n instance.update(**info)\n self.notify_observers(UPDATED, instance)\n\n\nclass VersionedServiceView(InstanceSet):\n def __init__(self, service, version):\n self.service = service\n self.spec = compatible(version)\n self.version = version\n\n def __str__(self):\n return '%s@%s' % (self.name, self.version)\n\n @property\n def name(self):\n return self.service.name\n\n def __iter__(self):\n for instance in self.service:\n if instance.version in self.spec:\n yield instance\n\n def observe(self, *args, **kwargs):\n return self.service.observe(*args, **kwargs)\n",
"step-ids": [
12,
18,
20,
22,
26
]
}
|
[
12,
18,
20,
22,
26
] |
import os, sys
import json
import paramiko
"""
Copies the credentials.json file locally from robot
"""
def copy_credentials_file(hostname, username, password, src_path, dst_path):
# create ssh connection
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
# ftp file from robot to local path
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
"""
Creates a default config file for AWS
(aws_config.json)
"""
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
"""
Checks for the aws_config.json file,
creates the file and populates with default values
if not found.
"""
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print("\nCreating default AWS config...")
create_default_config(config_path)
print("Done.\n")
return config_path
"""
Checks for the credentials.json file,
creates the file and populates with values from
robot if not found.
"""
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print("\nGrabbing AWS credentials from robot...")
copy_credentials_file(robot_name, username, password, src_path, dst_path)
print("Done.\n")
return dst_path
"""
Reads and returns contents of JSON file
"""
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
|
normal
|
{
"blob_id": "27f162f2e350fdb284740bd67f4293535f0ab593",
"index": 8451,
"step-1": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-4": "import os, sys\nimport json\nimport paramiko\n<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-5": "import os, sys\nimport json\nimport paramiko\n\n\"\"\"\n\tCopies the credentials.json file locally from robot\n\"\"\"\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n\t# create ssh connection\n\tssh_client = paramiko.SSHClient()\n\tssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\tssh_client.connect(hostname=hostname, username=username, password=password)\n\n\t# ftp file from robot to local path\n\tftp_client = ssh_client.open_sftp()\n\tftp_client.get(src_path, dst_path)\n\tftp_client.close()\n\n\n\"\"\"\n\tCreates a default config file for AWS\n\t(aws_config.json)\n\"\"\"\ndef create_default_config(path):\n\tdata = {}\n\tdata['method'] = 'GET'\n\tdata['service'] = 'ec2'\n\tdata['host'] = 'ec2.amazonaws.com'\n\tdata['region'] = 'us-east-1'\n\tdata['endpoint'] = 'https://ec2.amazonaws.com'\n\n\twith open(path, 'w+') as file:\n\t\tjson.dump(data, file)\n\n\n\"\"\"\n\tChecks for the aws_config.json file,\n\tcreates the file and populates with default values\n\tif not found.\n\"\"\"\ndef check_aws_config():\n\tconfig_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n\n\tif not os.path.exists(config_path):\n\t\tprint(\"\\nCreating default AWS config...\")\n\t\tcreate_default_config(config_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn config_path\n\n\n\"\"\"\n\tChecks for the credentials.json file,\n\tcreates the file and populates with values from\n\trobot if not found.\n\"\"\"\ndef check_credentials():\n\tlogin_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n\tlogin_data = load_json(login_file)\n\n\trobot_name = login_data['robot_name']\n\tusername = login_data['username']\n\tpassword = login_data['password']\n\t\n\tsrc_path = '/var/jibo/credentials.json'\n\tdst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n\n\tif not os.path.exists(dst_path):\n\t\tprint(\"\\nGrabbing AWS credentials from robot...\")\n\t\tcopy_credentials_file(robot_name, username, password, src_path, dst_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn dst_path\n\n\n\"\"\"\n\tReads and returns contents of JSON file\n\"\"\"\ndef load_json(path):\n\twith open(path, 'r') as file:\n\t\tdata = json.load(file)\n\treturn data\n\n\t",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from OpenSSL import SSL, crypto
from twisted.internet import ssl, reactor
from twisted.internet.protocol import Factory, Protocol
import os
from time import time
class Echo(Protocol):
def dataReceived(self, data):
print "Data received: " + data
# define cases
options = {
"generate": self.generateCertificate,
"sign": self.signCertificate
}
tmp = data.split(';')
method = tmp.pop(0)
print "method is " + method
#TODO: catch unknown cases
# delegate case to method
result = options[method](tmp)
self.transport.write(result)
def generateCertificate(self, userDataList):
# generate a key-pair with RSA and 2048 bits
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
# create a new certificate of x509 structure
x509 = crypto.X509()
# X509Name type
subject = self.setSubject(x509.get_subject(), userDataList)
#x509.set_subject(subject)
# list of (name, value) tuples
subComponents = subject.get_components()
for (name, value) in subComponents:
print name + " is " + value
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
#TODO: load our CA root cert(PKCS12 type) and set subject as issuer
# set issuer (CA) data
x509.set_issuer(x509.get_subject())
print "Issuer set - ACTUALLY SELF-SIGNED MODE!!!"
# set user public key
x509.set_pubkey(pkey)
#TODO: which algorithm to use? (replace with sha512)
#TODO: replace key with CA private key
# sign the certificate
x509.sign(pkey, 'sha256')
print "Certificate signed - ACTUALLY SELF-SIGNED MODE!!!"
# create a new PKCS12 object
pkcs12 = crypto.PKCS12()
# set the new user certificate
pkcs12.set_certificate(x509)
# insert user private key
pkcs12.set_privatekey(pkey)
# create a dump of PKCS12 and return
return pkcs12.export()
def setSubject(self, subject, data):
#subjectVariables = {
# "C": subject.C,
# "ST": subject.ST,
# "L": subject.L,
# "O": subject.O,
# "OU": subject.OU,
# "CN": subject.CN
#}
for d in data:
s = d.split('=')
variable = s[0]
value = s[1]
print "Setting variable " + variable + " to " + value + " on subject"
#subjectVariables[variable] = value
if variable == "C":
subject.C = value
elif variable == "ST":
subject.ST = value
elif variable == "L":
subject.L = value
elif variable == "O":
subject.O = value
elif variable == "OU":
subject.OU = value
elif variable == "CN":
subject.CN = value
return subject
def signCertificate(self, certData):
x509 = crypto.X509()
pkcs12 = crypto.load_pkcs12(certData)
req = pkcs12.get_certificate()
x509.set_subject(req.get_subject())
x509.set_pubkey(req.get_pubkey())
#issuer aus Datei setzen
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
x509.sign(pkey, 'sha256')
pkcs12.set_certificate(x509)
return pkcs12.export()
def verifyCallback(connection, x509, errnum, errdepth, ok):
if not ok:
print 'invalid cert from subject:', x509.get_subject()
return False
else:
print "Certs are fine", x509.get_subject()
return True
def getTimestamp():
return str(int(round(time() * 1000)))
def addTimestamp(millis, name):
print millis + '_' + name
if __name__ == '__main__':
factory = Factory()
factory.protocol = Echo
os.system("echo 'Server started...'")
myContextFactory = ssl.DefaultOpenSSLContextFactory(
'keys/ca-key.pem', 'keys/ca-root.pem'
)
ctx = myContextFactory.getContext()
# SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)
# VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat
# verwendet wird (setzt obigen Parameer vorraus!)
ctx.set_verify(
SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
verifyCallback
)
# Since we have self-signed certs we have to explicitly
# tell the server to trust them.
ctx.load_verify_locations("keys/ca-root.pem")
reactor.listenSSL(8000, factory, myContextFactory)
reactor.run()
|
normal
|
{
"blob_id": "9951588f581c5045154a77535b36d230d586d8a5",
"index": 338,
"step-1": "from OpenSSL import SSL, crypto\nfrom twisted.internet import ssl, reactor\nfrom twisted.internet.protocol import Factory, Protocol\n\nimport os\nfrom time import time\n\nclass Echo(Protocol):\n\n def dataReceived(self, data):\n print \"Data received: \" + data\n\n # define cases\n options = {\n \"generate\": self.generateCertificate,\n \"sign\": self.signCertificate\n }\n \n tmp = data.split(';')\n method = tmp.pop(0)\n print \"method is \" + method\n \n #TODO: catch unknown cases\n # delegate case to method\n result = options[method](tmp)\n \n self.transport.write(result)\n\n def generateCertificate(self, userDataList):\n # generate a key-pair with RSA and 2048 bits\n pkey = crypto.PKey()\n pkey.generate_key(crypto.TYPE_RSA, 2048)\n \n # create a new certificate of x509 structure\n x509 = crypto.X509()\n \n # X509Name type\n subject = self.setSubject(x509.get_subject(), userDataList)\n #x509.set_subject(subject)\n \n # list of (name, value) tuples\n subComponents = subject.get_components()\n for (name, value) in subComponents:\n print name + \" is \" + value\n \n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n \n #TODO: load our CA root cert(PKCS12 type) and set subject as issuer\n # set issuer (CA) data\n x509.set_issuer(x509.get_subject())\n print \"Issuer set - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # set user public key\n x509.set_pubkey(pkey)\n \n #TODO: which algorithm to use? (replace with sha512)\n #TODO: replace key with CA private key\n # sign the certificate\n x509.sign(pkey, 'sha256')\n print \"Certificate signed - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # create a new PKCS12 object\n pkcs12 = crypto.PKCS12()\n \n # set the new user certificate\n pkcs12.set_certificate(x509)\n \n # insert user private key\n pkcs12.set_privatekey(pkey)\n \n # create a dump of PKCS12 and return\n return pkcs12.export()\n \n def setSubject(self, subject, data):\n #subjectVariables = {\n # \"C\": subject.C,\n # \"ST\": subject.ST,\n # \"L\": subject.L,\n # \"O\": subject.O,\n # \"OU\": subject.OU,\n # \"CN\": subject.CN\n #}\n \n for d in data:\n s = d.split('=')\n variable = s[0]\n value = s[1]\n print \"Setting variable \" + variable + \" to \" + value + \" on subject\"\n #subjectVariables[variable] = value\n if variable == \"C\":\n subject.C = value\n elif variable == \"ST\":\n subject.ST = value\n elif variable == \"L\":\n subject.L = value\n elif variable == \"O\":\n subject.O = value\n elif variable == \"OU\":\n subject.OU = value\n elif variable == \"CN\":\n subject.CN = value\n \n return subject\n \n def signCertificate(self, certData):\n\n x509 = crypto.X509()\n pkcs12 = crypto.load_pkcs12(certData)\n req = pkcs12.get_certificate()\n x509.set_subject(req.get_subject())\n x509.set_pubkey(req.get_pubkey())\n\n #issuer aus Datei setzen\n\n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n\n x509.sign(pkey, 'sha256')\n\n pkcs12.set_certificate(x509)\n\n return pkcs12.export()\n \n\ndef verifyCallback(connection, x509, errnum, errdepth, ok):\n if not ok:\n print 'invalid cert from subject:', x509.get_subject()\n return False\n else:\n print \"Certs are fine\", x509.get_subject()\n return True\n\ndef getTimestamp():\n return str(int(round(time() * 1000)))\n\ndef addTimestamp(millis, name):\n print millis + '_' + name\n\nif __name__ == '__main__':\n factory = Factory()\n factory.protocol = Echo\n\n os.system(\"echo 'Server started...'\")\n\n myContextFactory = ssl.DefaultOpenSSLContextFactory(\n 'keys/ca-key.pem', 'keys/ca-root.pem'\n )\n\n ctx = myContextFactory.getContext()\n\n # SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)\n # VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat \n # verwendet wird (setzt obigen Parameer vorraus!)\n ctx.set_verify(\n SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n verifyCallback\n )\n\n # Since we have self-signed certs we have to explicitly\n # tell the server to trust them.\n ctx.load_verify_locations(\"keys/ca-root.pem\")\n\n reactor.listenSSL(8000, factory, myContextFactory)\n reactor.run()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.0.4 on 2020-03-11 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20200310_1620'),
]
operations = [
migrations.AddField(
model_name='tag',
name='name',
field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.CharField(default='code', max_length=100, unique=True),
),
]
|
normal
|
{
"blob_id": "ab12468b1da20c896e3578091fd9ba245dcfa0a4",
"index": 1350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-5": "# Generated by Django 3.0.4 on 2020-03-11 17:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_auto_20200310_1620'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='name',\n field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),\n ),\n migrations.AddField(\n model_name='tag',\n name='slug',\n field=models.CharField(default='code', max_length=100, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Plot the output data.
"""
# Standard library
import os
import json
import math
import matplotlib as maplot
import matplotlib.pyplot as pyplot
from datetime import datetime
# User library
from sub.inputprocess import CONSTANTS as CONS
# **json.loads(json_data)
def get_data():
"""Read output file to get data."""
try:
with open(CONS["OUTPUT_FILE"], "r") as file:
data = json.load(file)[1]
return data
except FileNotFoundError:
print("Data file not found.")
exit()
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]["objective"]) for population in data]
# objectives = [population[0]["objective"] for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == "x":
# Use this os._exit(0) to close whole window, even when playing
os._exit(0)
if event.key == "s":
# Get time to define image's name
now = datetime.now()
current_time = now.strftime("%H-%M-%S")
plot_name = "Plot" + "-" + current_time
# Remove left title, then save image
pyplot.title("", loc="left", pad=20)
fig.savefig(
"%s%s%s"
% (
CONS["OUTPUT_PHOTO_DIRECTORY"],
plot_name,
CONS["PHOTO_TYPE"],
),
transparent=False,
dpi=300,
)
# Use this exit(0) to prevent exiting when playing the plot
# but allow closing when plotting finishes
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
# Turn on grid with dashed style
subplot.yaxis.grid(True, linestyle="dashed")
# Get list of new higher values
new_values = get_new_values(values)
# Plot 2 lines
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
# Print left plot title
pyplot.title(
"Press X to exit\nPress S to save",
loc="left",
fontsize=14,
color="#1F76B4",
style="italic",
pad=20,
)
# Print right plot title
pyplot.title(
f"{'Max objective:':>25}{max(values):>10.2E}\n"
f"{'Generation:':>25}{values.index(max(values)):>10}",
loc="right",
fontfamily="Lucida Sans Typewriter",
fontsize=12,
color="#FF7E0E",
pad=20,
)
# The following code configures some elements of the plot window
# Disable toolbar
maplot.rcParams["toolbar"] = "None"
# Set font
maplot.rcParams["font.family"] = "Candara"
maplot.rcParams["font.size"] = 12
maplot.rcParams["font.weight"] = 500
# Set window title
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title("Prosthetic Foot Design by Genetic Algorithm")
# Set icon
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS["ICON_FILE"])
# Disable some borders
subplot = fig.add_subplot(111, frameon=True)
subplot.spines["right"].set_visible(False)
subplot.spines["left"].set_visible(False)
subplot.spines["top"].set_visible(False)
# Push verticle axis to the right
subplot.yaxis.tick_right()
# Padding axis label from plot area, maybe unnecessary
subplot.tick_params(axis="y", which="major", pad=5)
subplot.tick_params(axis="x", which="major", pad=5)
# Adjust subplot size based on window size
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
# Reconize key pressed
pyplot.connect("key_press_event", on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
# Hold window
pyplot.show()
if __name__ == "__main__":
__package__ = "inputprocess"
objectives = get_objectives(get_data())
main(objectives, is_animation=CONS["IS_ANIMATION"])
|
normal
|
{
"blob_id": "f4f08015b7638f4d6ea793350d5d19a3485978cd",
"index": 53,
"step-1": "<mask token>\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\nif __name__ == '__main__':\n __package__ = 'inputprocess'\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS['IS_ANIMATION'])\n",
"step-4": "<mask token>\nimport os\nimport json\nimport math\nimport matplotlib as maplot\nimport matplotlib.pyplot as pyplot\nfrom datetime import datetime\nfrom sub.inputprocess import CONSTANTS as CONS\n\n\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS['OUTPUT_FILE'], 'r') as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print('Data file not found.')\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0]['objective']) for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n if event.key == 'x':\n os._exit(0)\n if event.key == 's':\n now = datetime.now()\n current_time = now.strftime('%H-%M-%S')\n plot_name = 'Plot' + '-' + current_time\n pyplot.title('', loc='left', pad=20)\n fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],\n plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n subplot.yaxis.grid(True, linestyle='dashed')\n new_values = get_new_values(values)\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n pyplot.title('Press X to exit\\nPress S to save', loc='left',\n fontsize=14, color='#1F76B4', style='italic', pad=20)\n pyplot.title(\n f\"\"\"{'Max objective:':>25}{max(values):>10.2E}\n{'Generation:':>25}{values.index(max(values)):>10}\"\"\"\n , loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,\n color='#FF7E0E', pad=20)\n maplot.rcParams['toolbar'] = 'None'\n maplot.rcParams['font.family'] = 'Candara'\n maplot.rcParams['font.size'] = 12\n maplot.rcParams['font.weight'] = 500\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS['ICON_FILE'])\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_visible(False)\n subplot.spines['top'].set_visible(False)\n subplot.yaxis.tick_right()\n subplot.tick_params(axis='y', which='major', pad=5)\n subplot.tick_params(axis='x', which='major', pad=5)\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n pyplot.connect('key_press_event', on_clicked)\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n pyplot.show()\n\n\nif __name__ == '__main__':\n __package__ = 'inputprocess'\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS['IS_ANIMATION'])\n",
"step-5": "\"\"\"Plot the output data.\n\"\"\"\n\n# Standard library\nimport os\nimport json\nimport math\nimport matplotlib as maplot\nimport matplotlib.pyplot as pyplot\nfrom datetime import datetime\n\n# User library\nfrom sub.inputprocess import CONSTANTS as CONS\n\n\n# **json.loads(json_data)\ndef get_data():\n \"\"\"Read output file to get data.\"\"\"\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()\n\n\ndef get_objectives(data):\n \"\"\"Get a list of all first chromosomes' objective values.\"\"\"\n objectives = [math.log(population[0][\"objective\"]) for population in data]\n # objectives = [population[0][\"objective\"] for population in data]\n return objectives\n\n\ndef get_new_values(values):\n \"\"\"Record any changes higher. Its size is the same as its argument's.\"\"\"\n new_values = []\n new_value = values[0]\n for value in values:\n if value > new_value:\n new_value = value\n new_values.append(new_value)\n return new_values\n\n\ndef main(values, is_animation=False):\n \"\"\"Main function to show the plot which could be played with animation.\"\"\"\n\n def on_clicked(event):\n \"\"\"Direct the program when a key is pressed.\"\"\"\n\n if event.key == \"x\":\n # Use this os._exit(0) to close whole window, even when playing\n os._exit(0)\n\n if event.key == \"s\":\n # Get time to define image's name\n now = datetime.now()\n current_time = now.strftime(\"%H-%M-%S\")\n plot_name = \"Plot\" + \"-\" + current_time\n\n # Remove left title, then save image\n pyplot.title(\"\", loc=\"left\", pad=20)\n fig.savefig(\n \"%s%s%s\"\n % (\n CONS[\"OUTPUT_PHOTO_DIRECTORY\"],\n plot_name,\n CONS[\"PHOTO_TYPE\"],\n ),\n transparent=False,\n dpi=300,\n )\n\n # Use this exit(0) to prevent exiting when playing the plot\n # but allow closing when plotting finishes\n exit(0)\n\n def draw(values):\n \"\"\"Plot the grid, the line graphs and the titles.\"\"\"\n\n # Turn on grid with dashed style\n subplot.yaxis.grid(True, linestyle=\"dashed\")\n\n # Get list of new higher values\n new_values = get_new_values(values)\n\n # Plot 2 lines\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n\n # Print left plot title\n pyplot.title(\n \"Press X to exit\\nPress S to save\",\n loc=\"left\",\n fontsize=14,\n color=\"#1F76B4\",\n style=\"italic\",\n pad=20,\n )\n\n # Print right plot title\n pyplot.title(\n f\"{'Max objective:':>25}{max(values):>10.2E}\\n\"\n f\"{'Generation:':>25}{values.index(max(values)):>10}\",\n loc=\"right\",\n fontfamily=\"Lucida Sans Typewriter\",\n fontsize=12,\n color=\"#FF7E0E\",\n pad=20,\n )\n\n # The following code configures some elements of the plot window\n\n # Disable toolbar\n maplot.rcParams[\"toolbar\"] = \"None\"\n\n # Set font\n maplot.rcParams[\"font.family\"] = \"Candara\"\n maplot.rcParams[\"font.size\"] = 12\n maplot.rcParams[\"font.weight\"] = 500\n\n # Set window title\n fig = pyplot.figure(figsize=(10, 5))\n fig.canvas.set_window_title(\"Prosthetic Foot Design by Genetic Algorithm\")\n\n # Set icon\n manager = pyplot.get_current_fig_manager()\n manager.window.wm_iconbitmap(CONS[\"ICON_FILE\"])\n\n # Disable some borders\n subplot = fig.add_subplot(111, frameon=True)\n subplot.spines[\"right\"].set_visible(False)\n subplot.spines[\"left\"].set_visible(False)\n subplot.spines[\"top\"].set_visible(False)\n\n # Push verticle axis to the right\n subplot.yaxis.tick_right()\n\n # Padding axis label from plot area, maybe unnecessary\n subplot.tick_params(axis=\"y\", which=\"major\", pad=5)\n subplot.tick_params(axis=\"x\", which=\"major\", pad=5)\n\n # Adjust subplot size based on window size\n pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)\n\n # Reconize key pressed\n pyplot.connect(\"key_press_event\", on_clicked)\n\n if is_animation:\n for index in range(1, len(values) + 1):\n subplot.clear()\n draw(values[:index])\n pyplot.pause(0.0001)\n else:\n draw(values)\n\n # Hold window\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n __package__ = \"inputprocess\"\n objectives = get_objectives(get_data())\n main(objectives, is_animation=CONS[\"IS_ANIMATION\"])\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import os
import time
from datetime import datetime
from typing import List, Tuple
from pyspark.sql import SparkSession
from Chapter01.utilities01_py.helper_python import create_session
from Chapter02.utilities02_py.domain_objects import WarcRecord
from Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) -> bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) -> (int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
if __name__ == "__main__":
session: SparkSession = create_session(3, "Wave exploration")
input_warc = "/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc" # ToDo: Change path
raw_records = extract_raw_records(input_warc, session)
warc_records = raw_records \
.flatMap(parse_raw_warc)
process_ids_rdd = warc_records\
.map(fall_asleep)\
.filter(trivial_filter)\
.map(quick_print)
distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct().collect()
print(distinct_process_ids)
|
normal
|
{
"blob_id": "fccdf75fe83ad8388c12a63555c4132181fd349a",
"index": 1646,
"step-1": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\n<mask token>\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\nif __name__ == '__main__':\n session: SparkSession = create_session(3, 'Wave exploration')\n input_warc = (\n '/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'\n )\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records.flatMap(parse_raw_warc)\n process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(\n quick_print)\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(\n ).collect()\n print(distinct_process_ids)\n",
"step-4": "import os\nimport time\nfrom datetime import datetime\nfrom typing import List, Tuple\nfrom pyspark.sql import SparkSession\nfrom Chapter01.utilities01_py.helper_python import create_session\nfrom Chapter02.utilities02_py.domain_objects import WarcRecord\nfrom Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\nif __name__ == '__main__':\n session: SparkSession = create_session(3, 'Wave exploration')\n input_warc = (\n '/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'\n )\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records.flatMap(parse_raw_warc)\n process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(\n quick_print)\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(\n ).collect()\n print(distinct_process_ids)\n",
"step-5": "import os\nimport time\nfrom datetime import datetime\nfrom typing import List, Tuple\nfrom pyspark.sql import SparkSession\nfrom Chapter01.utilities01_py.helper_python import create_session\nfrom Chapter02.utilities02_py.domain_objects import WarcRecord\nfrom Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) -> bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) -> (int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\nif __name__ == \"__main__\":\n session: SparkSession = create_session(3, \"Wave exploration\")\n\n input_warc = \"/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc\" # ToDo: Change path\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records \\\n .flatMap(parse_raw_warc)\n\n process_ids_rdd = warc_records\\\n .map(fall_asleep)\\\n .filter(trivial_filter)\\\n .map(quick_print)\n\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct().collect()\n print(distinct_process_ids)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Copyright (c) 2020 Hai Nguyen
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import tensorflow.keras.backend as K
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
union = K.sum(y_true_f) + K.sum(y_pred_f)
return (2. * intersection + smooth) / (union + smooth)
def true_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
return tp
def true_neg(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tn = K.sum(y_neg * y_pred_neg)
tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)
return tn_ratio
def false_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
fp = K.sum(y_neg * y_pred_pos)
fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)
return fp_ratio
|
normal
|
{
"blob_id": "18b10a68b2707b7bfeccbd31c5d15686453b3406",
"index": 6253,
"step-1": "<mask token>\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-2": "<mask token>\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\n<mask token>\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-3": "<mask token>\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-4": "import tensorflow.keras.backend as K\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-5": "# Copyright (c) 2020 Hai Nguyen\n# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nimport tensorflow.keras.backend as K\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2. * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth) \n return tp \n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# Code Rodrigo
'''
This script, basically generates all he possible combinations
to be analyzed according to the Dempster Shafer Theory.
It requires to define beforehand, the combination of variables
that lead to the higher and lower bound for a given combination
of random sets, via the sensitivity analysis
'''
import itertools as itt
import numpy as np
import matplotlib.pyplot as plt
def read_input_RS ():
low=np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)
lower_bound = np.ravel(low)
upper_bound = (np.ravel(np.transpose(np.loadtxt('UpperArray.csv',
delimiter=',', skiprows=1))))
return lower_bound, upper_bound, low[0,:].size
def generate_combinations (lower, upper, n):
lower_input = itt.combinations(lower, n)
upper_input = np.array(list(itt.product(upper, repeat=n)))
return lower_input, upper_input,
def independent_probability ():
probability_assignment = (np.loadtxt('ProbabilityAssignment.csv',
delimiter=',', skiprows=1))
return probability_assignment
if __name__ == "__main__":
a,b,r=read_input_RS ()
#c=a[0,:].size
d,e=generate_combinations (a,b,r)
print(b)
print(e)
np.savetxt('test.out', e, delimiter=',')
#b=read_input_RS ()
#c=generate_combinations (a,b)
|
normal
|
{
"blob_id": "4b44f4343da1677b5436ec2b153e573fda3c0cee",
"index": 2280,
"step-1": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\n<mask token>\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\nif __name__ == '__main__':\n a, b, r = read_input_RS()\n d, e = generate_combinations(a, b, r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n",
"step-4": "<mask token>\nimport itertools as itt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\nif __name__ == '__main__':\n a, b, r = read_input_RS()\n d, e = generate_combinations(a, b, r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n",
"step-5": "# Code Rodrigo\n\n'''\nThis script, basically generates all he possible combinations\nto be analyzed according to the Dempster Shafer Theory.\nIt requires to define beforehand, the combination of variables\nthat lead to the higher and lower bound for a given combination\nof random sets, via the sensitivity analysis\n'''\n\nimport itertools as itt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_input_RS ():\n low=np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = (np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1))))\n return lower_bound, upper_bound, low[0,:].size\n\ndef generate_combinations (lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input,\n\ndef independent_probability ():\n probability_assignment = (np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1))\n return probability_assignment\n\nif __name__ == \"__main__\":\n a,b,r=read_input_RS ()\n #c=a[0,:].size\n d,e=generate_combinations (a,b,r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n\n#b=read_input_RS ()\n#c=generate_combinations (a,b)\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from collections import deque
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
def binary_numbers(n):
queue = Queue()
queue.enqueue("1")
for i in range(n):
front = queue.front()
print(" ", front)
queue.enqueue(front + "0")
queue.enqueue(front + "1")
queue.dequeue()
if __name__ == '__main__':
binary_numbers(20)
|
normal
|
{
"blob_id": "2898506b9fd5b112f93a1ff6b010848244c398bd",
"index": 7197,
"step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n <mask token>\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)\n",
"step-5": "from collections import deque\n\nclass Queue:\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue(\"1\")\n\n for i in range(n):\n front = queue.front()\n print(\" \", front)\n queue.enqueue(front + \"0\")\n queue.enqueue(front + \"1\")\n\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from gamesim import GameSim
from network import Network
from player import RemotePlayer
from mutator import Mutator
from random import *
import copy
game = GameSim()
game.make_players(10)
base = "networks/"
dir = ""
name = "203964_85377"
gens = 2000
game.players[0].import_player(base + dir + name + ".network")
game.train(gens)
if (gens%500 != 0):
game.players[0].export_player()
|
normal
|
{
"blob_id": "1aace7b9385aefdc503ce0e43e0f7f0996fe112a",
"index": 4284,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngame.make_players(10)\n<mask token>\ngame.players[0].import_player(base + dir + name + '.network')\ngame.train(gens)\nif gens % 500 != 0:\n game.players[0].export_player()\n",
"step-3": "<mask token>\ngame = GameSim()\ngame.make_players(10)\nbase = 'networks/'\ndir = ''\nname = '203964_85377'\ngens = 2000\ngame.players[0].import_player(base + dir + name + '.network')\ngame.train(gens)\nif gens % 500 != 0:\n game.players[0].export_player()\n",
"step-4": "from gamesim import GameSim\nfrom network import Network\nfrom player import RemotePlayer\nfrom mutator import Mutator\nfrom random import *\nimport copy\ngame = GameSim()\ngame.make_players(10)\nbase = 'networks/'\ndir = ''\nname = '203964_85377'\ngens = 2000\ngame.players[0].import_player(base + dir + name + '.network')\ngame.train(gens)\nif gens % 500 != 0:\n game.players[0].export_player()\n",
"step-5": "from gamesim import GameSim\nfrom network import Network\nfrom player import RemotePlayer\nfrom mutator import Mutator\nfrom random import *\nimport copy\n\ngame = GameSim()\ngame.make_players(10)\n\nbase = \"networks/\"\ndir = \"\"\nname = \"203964_85377\"\n\ngens = 2000\n\ngame.players[0].import_player(base + dir + name + \".network\")\ngame.train(gens)\n\nif (gens%500 != 0):\n\tgame.players[0].export_player()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from tkinter.ttk import *
from tkinter import *
import tkinter.ttk as ttk
from tkinter import messagebox
import sqlite3
root = Tk()
root.title('Register-Form')
root.geometry("600x450+-2+86")
root.minsize(120, 1)
def delete():
if(Entry1.get()==''):
messagebox.showerror('Register-Form', 'ID Is compolsary for delete')
else:
ms = messagebox.askokcancel('Delete Result', 'Would you like to delete this account?')
if (ms):
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("delete from student where id='"+ Entry1.get() +"'")
c.execute('commit')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
messagebox.showwarning('Delete Status', 'Deleted Succesfully')
conn.close()
def sign_in():
root.destroy()
import main
def insert_info():
idp=Entry1.get()
un=Entry2.get()
password=Entry3.get()
if (idp=='' and password=='' and un==''):
messagebox.showerror('Submit Status', 'All fields are requierd')
elif Entry3.get() != Entry4.get():
messagebox.showerror('register error', 'please confirm password')
Entry4.delete(0, END)
Entry4.focus()
else:
try:
id1=Entry1.get();
uname=Entry2.get();
password1=Entry3.get();
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)")
c.execute("INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)", (id1, uname, password1))
conn.commit()
conn.close()
messagebox.showinfo('Register Form', 'Account Created Successfully!')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
except sqlite3.IntegrityError:
messagebox.showerror('Register Form', f'Please use another id instead of {Entry1.get()} because that id exists')
Entry1.focus()
Label1 = ttk.Label(root)
Label1.place(relx=0.35, rely=0.156, height=21, width=44)
Label1.configure(text='''Enter ID:''')
Label2 = ttk.Label(root)
Label2.place(relx=0.35, rely=0.2, height=31, width=54)
Label2.configure(text='''UName:''')
Label3 = ttk.Label(root)
Label3.place(relx=0.333, rely=0.289, height=21, width=64)
Label3.configure(text='''Password:''')
Label4 = ttk.Label(root)
Label4.place(relx=0.267, rely=0.356, height=21, width=104)
Label4.configure(text='''Confirm Password:''')
Entry1 = ttk.Entry(root)
Entry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)
Entry2 = ttk.Entry(root)
Entry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)
Entry3 = ttk.Entry(root, show='*')
Entry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)
Entry4 = ttk.Entry(root, show='*')
Entry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)
b0 = ttk.Button(root, command=sign_in)
b0.place(relx=0.467, rely=0.578, height=84, width=87)
b0.configure(text='Sign in')
b1 = ttk.Button(root, text='Submit', command=insert_info)
b1.place(relx=0.767, rely=0.578, height=84, width=87)
B3 = ttk.Button(root, command=delete)
B3.place(relx=0.617, rely=0.578, height=84, width=87)
B3.configure(text='''Delete''')
root.mainloop()
|
normal
|
{
"blob_id": "37cafe5d3d3342e5e4070b87caf0cfb5bcfdfd8d",
"index": 1613,
"step-1": "<mask token>\n\n\ndef sign_in():\n root.destroy()\n import main\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.title('Register-Form')\nroot.geometry('600x450+-2+86')\nroot.minsize(120, 1)\n\n\ndef delete():\n if Entry1.get() == '':\n messagebox.showerror('Register-Form', 'ID Is compolsary for delete')\n else:\n ms = messagebox.askokcancel('Delete Result',\n 'Would you like to delete this account?')\n if ms:\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\"delete from student where id='\" + Entry1.get() + \"'\")\n c.execute('commit')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n messagebox.showwarning('Delete Status', 'Deleted Succesfully')\n conn.close()\n\n\ndef sign_in():\n root.destroy()\n import main\n\n\ndef insert_info():\n idp = Entry1.get()\n un = Entry2.get()\n password = Entry3.get()\n if idp == '' and password == '' and un == '':\n messagebox.showerror('Submit Status', 'All fields are requierd')\n elif Entry3.get() != Entry4.get():\n messagebox.showerror('register error', 'please confirm password')\n Entry4.delete(0, END)\n Entry4.focus()\n else:\n try:\n id1 = Entry1.get()\n uname = Entry2.get()\n password1 = Entry3.get()\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\n 'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'\n )\n c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',\n (id1, uname, password1))\n conn.commit()\n conn.close()\n messagebox.showinfo('Register Form',\n 'Account Created Successfully!')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n except sqlite3.IntegrityError:\n messagebox.showerror('Register Form',\n f'Please use another id instead of {Entry1.get()} because that id exists'\n )\n Entry1.focus()\n\n\n<mask token>\nLabel1.place(relx=0.35, rely=0.156, height=21, width=44)\nLabel1.configure(text='Enter ID:')\n<mask token>\nLabel2.place(relx=0.35, rely=0.2, height=31, width=54)\nLabel2.configure(text='UName:')\n<mask token>\nLabel3.place(relx=0.333, rely=0.289, height=21, width=64)\nLabel3.configure(text='Password:')\n<mask token>\nLabel4.place(relx=0.267, rely=0.356, height=21, width=104)\nLabel4.configure(text='Confirm Password:')\n<mask token>\nEntry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)\n<mask token>\nEntry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)\n<mask token>\nEntry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)\n<mask token>\nEntry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)\n<mask token>\nb0.place(relx=0.467, rely=0.578, height=84, width=87)\nb0.configure(text='Sign in')\n<mask token>\nb1.place(relx=0.767, rely=0.578, height=84, width=87)\n<mask token>\nB3.place(relx=0.617, rely=0.578, height=84, width=87)\nB3.configure(text='Delete')\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('Register-Form')\nroot.geometry('600x450+-2+86')\nroot.minsize(120, 1)\n\n\ndef delete():\n if Entry1.get() == '':\n messagebox.showerror('Register-Form', 'ID Is compolsary for delete')\n else:\n ms = messagebox.askokcancel('Delete Result',\n 'Would you like to delete this account?')\n if ms:\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\"delete from student where id='\" + Entry1.get() + \"'\")\n c.execute('commit')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n messagebox.showwarning('Delete Status', 'Deleted Succesfully')\n conn.close()\n\n\ndef sign_in():\n root.destroy()\n import main\n\n\ndef insert_info():\n idp = Entry1.get()\n un = Entry2.get()\n password = Entry3.get()\n if idp == '' and password == '' and un == '':\n messagebox.showerror('Submit Status', 'All fields are requierd')\n elif Entry3.get() != Entry4.get():\n messagebox.showerror('register error', 'please confirm password')\n Entry4.delete(0, END)\n Entry4.focus()\n else:\n try:\n id1 = Entry1.get()\n uname = Entry2.get()\n password1 = Entry3.get()\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\n 'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'\n )\n c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',\n (id1, uname, password1))\n conn.commit()\n conn.close()\n messagebox.showinfo('Register Form',\n 'Account Created Successfully!')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n except sqlite3.IntegrityError:\n messagebox.showerror('Register Form',\n f'Please use another id instead of {Entry1.get()} because that id exists'\n )\n Entry1.focus()\n\n\nLabel1 = ttk.Label(root)\nLabel1.place(relx=0.35, rely=0.156, height=21, width=44)\nLabel1.configure(text='Enter ID:')\nLabel2 = ttk.Label(root)\nLabel2.place(relx=0.35, rely=0.2, height=31, width=54)\nLabel2.configure(text='UName:')\nLabel3 = ttk.Label(root)\nLabel3.place(relx=0.333, rely=0.289, height=21, width=64)\nLabel3.configure(text='Password:')\nLabel4 = ttk.Label(root)\nLabel4.place(relx=0.267, rely=0.356, height=21, width=104)\nLabel4.configure(text='Confirm Password:')\nEntry1 = ttk.Entry(root)\nEntry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)\nEntry2 = ttk.Entry(root)\nEntry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)\nEntry3 = ttk.Entry(root, show='*')\nEntry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)\nEntry4 = ttk.Entry(root, show='*')\nEntry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)\nb0 = ttk.Button(root, command=sign_in)\nb0.place(relx=0.467, rely=0.578, height=84, width=87)\nb0.configure(text='Sign in')\nb1 = ttk.Button(root, text='Submit', command=insert_info)\nb1.place(relx=0.767, rely=0.578, height=84, width=87)\nB3 = ttk.Button(root, command=delete)\nB3.place(relx=0.617, rely=0.578, height=84, width=87)\nB3.configure(text='Delete')\nroot.mainloop()\n",
"step-4": "from tkinter.ttk import *\nfrom tkinter import *\nimport tkinter.ttk as ttk\nfrom tkinter import messagebox\nimport sqlite3\nroot = Tk()\nroot.title('Register-Form')\nroot.geometry('600x450+-2+86')\nroot.minsize(120, 1)\n\n\ndef delete():\n if Entry1.get() == '':\n messagebox.showerror('Register-Form', 'ID Is compolsary for delete')\n else:\n ms = messagebox.askokcancel('Delete Result',\n 'Would you like to delete this account?')\n if ms:\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\"delete from student where id='\" + Entry1.get() + \"'\")\n c.execute('commit')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n messagebox.showwarning('Delete Status', 'Deleted Succesfully')\n conn.close()\n\n\ndef sign_in():\n root.destroy()\n import main\n\n\ndef insert_info():\n idp = Entry1.get()\n un = Entry2.get()\n password = Entry3.get()\n if idp == '' and password == '' and un == '':\n messagebox.showerror('Submit Status', 'All fields are requierd')\n elif Entry3.get() != Entry4.get():\n messagebox.showerror('register error', 'please confirm password')\n Entry4.delete(0, END)\n Entry4.focus()\n else:\n try:\n id1 = Entry1.get()\n uname = Entry2.get()\n password1 = Entry3.get()\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\n 'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'\n )\n c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',\n (id1, uname, password1))\n conn.commit()\n conn.close()\n messagebox.showinfo('Register Form',\n 'Account Created Successfully!')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n except sqlite3.IntegrityError:\n messagebox.showerror('Register Form',\n f'Please use another id instead of {Entry1.get()} because that id exists'\n )\n Entry1.focus()\n\n\nLabel1 = ttk.Label(root)\nLabel1.place(relx=0.35, rely=0.156, height=21, width=44)\nLabel1.configure(text='Enter ID:')\nLabel2 = ttk.Label(root)\nLabel2.place(relx=0.35, rely=0.2, height=31, width=54)\nLabel2.configure(text='UName:')\nLabel3 = ttk.Label(root)\nLabel3.place(relx=0.333, rely=0.289, height=21, width=64)\nLabel3.configure(text='Password:')\nLabel4 = ttk.Label(root)\nLabel4.place(relx=0.267, rely=0.356, height=21, width=104)\nLabel4.configure(text='Confirm Password:')\nEntry1 = ttk.Entry(root)\nEntry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)\nEntry2 = ttk.Entry(root)\nEntry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)\nEntry3 = ttk.Entry(root, show='*')\nEntry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)\nEntry4 = ttk.Entry(root, show='*')\nEntry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)\nb0 = ttk.Button(root, command=sign_in)\nb0.place(relx=0.467, rely=0.578, height=84, width=87)\nb0.configure(text='Sign in')\nb1 = ttk.Button(root, text='Submit', command=insert_info)\nb1.place(relx=0.767, rely=0.578, height=84, width=87)\nB3 = ttk.Button(root, command=delete)\nB3.place(relx=0.617, rely=0.578, height=84, width=87)\nB3.configure(text='Delete')\nroot.mainloop()\n",
"step-5": "from tkinter.ttk import *\nfrom tkinter import *\nimport tkinter.ttk as ttk\nfrom tkinter import messagebox\nimport sqlite3\n\nroot = Tk()\nroot.title('Register-Form')\nroot.geometry(\"600x450+-2+86\")\nroot.minsize(120, 1)\n\ndef delete():\n if(Entry1.get()==''):\n messagebox.showerror('Register-Form', 'ID Is compolsary for delete')\n else:\n ms = messagebox.askokcancel('Delete Result', 'Would you like to delete this account?')\n if (ms):\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\"delete from student where id='\"+ Entry1.get() +\"'\")\n c.execute('commit')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n messagebox.showwarning('Delete Status', 'Deleted Succesfully')\n conn.close()\n\ndef sign_in():\n root.destroy()\n import main\n\ndef insert_info():\n idp=Entry1.get()\n un=Entry2.get()\n password=Entry3.get()\n if (idp=='' and password=='' and un==''):\n messagebox.showerror('Submit Status', 'All fields are requierd')\n elif Entry3.get() != Entry4.get():\n messagebox.showerror('register error', 'please confirm password')\n Entry4.delete(0, END) \n Entry4.focus()\n else:\n try:\n id1=Entry1.get();\n uname=Entry2.get();\n password1=Entry3.get();\n\n conn = sqlite3.connect('userinfo.db')\n with conn:\n c = conn.cursor()\n c.execute(\"CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)\")\n c.execute(\"INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)\", (id1, uname, password1))\n conn.commit()\n conn.close()\n messagebox.showinfo('Register Form', 'Account Created Successfully!')\n Entry1.delete(0, END)\n Entry2.delete(0, END)\n Entry3.delete(0, END)\n Entry4.delete(0, END)\n except sqlite3.IntegrityError:\n messagebox.showerror('Register Form', f'Please use another id instead of {Entry1.get()} because that id exists')\n Entry1.focus()\n\nLabel1 = ttk.Label(root)\nLabel1.place(relx=0.35, rely=0.156, height=21, width=44)\nLabel1.configure(text='''Enter ID:''')\n\nLabel2 = ttk.Label(root)\nLabel2.place(relx=0.35, rely=0.2, height=31, width=54)\nLabel2.configure(text='''UName:''')\n\nLabel3 = ttk.Label(root)\nLabel3.place(relx=0.333, rely=0.289, height=21, width=64)\nLabel3.configure(text='''Password:''')\n\nLabel4 = ttk.Label(root)\nLabel4.place(relx=0.267, rely=0.356, height=21, width=104)\nLabel4.configure(text='''Confirm Password:''')\n\nEntry1 = ttk.Entry(root)\nEntry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)\n\nEntry2 = ttk.Entry(root)\nEntry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)\n\nEntry3 = ttk.Entry(root, show='*')\nEntry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)\n\n\nEntry4 = ttk.Entry(root, show='*')\nEntry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)\n\nb0 = ttk.Button(root, command=sign_in)\nb0.place(relx=0.467, rely=0.578, height=84, width=87)\nb0.configure(text='Sign in')\n\n\nb1 = ttk.Button(root, text='Submit', command=insert_info)\nb1.place(relx=0.767, rely=0.578, height=84, width=87)\n\n\nB3 = ttk.Button(root, command=delete)\nB3.place(relx=0.617, rely=0.578, height=84, width=87)\nB3.configure(text='''Delete''')\n\n\n\nroot.mainloop()\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
# Feito por Kelvin Schneider
#12
numero = input("Digite um numero de telefone: ")
numero = numero.replace("-","")
if (len(numero) < 8):
while len(numero) < 8:
numero = "3" + numero
numero = numero[:4] + "-" + numero[4:]
print("Numero: ", numero)
elif (len(numero) > 8):
print("Numero invalido")
|
normal
|
{
"blob_id": "6297256bce1954f041915a1ce0aa0546689850f3",
"index": 2256,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(numero) < 8:\n while len(numero) < 8:\n numero = '3' + numero\n numero = numero[:4] + '-' + numero[4:]\n print('Numero: ', numero)\nelif len(numero) > 8:\n print('Numero invalido')\n",
"step-3": "numero = input('Digite um numero de telefone: ')\nnumero = numero.replace('-', '')\nif len(numero) < 8:\n while len(numero) < 8:\n numero = '3' + numero\n numero = numero[:4] + '-' + numero[4:]\n print('Numero: ', numero)\nelif len(numero) > 8:\n print('Numero invalido')\n",
"step-4": "# Feito por Kelvin Schneider\n#12\n\nnumero = input(\"Digite um numero de telefone: \")\nnumero = numero.replace(\"-\",\"\")\n\nif (len(numero) < 8):\n while len(numero) < 8:\n numero = \"3\" + numero\n\n numero = numero[:4] + \"-\" + numero[4:]\n print(\"Numero: \", numero)\n\nelif (len(numero) > 8):\n print(\"Numero invalido\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from googleAPI.drive import *
class GoogleSheet(GoogleDrive):
"""
The base class of Google Sheet API.
It aims at dealing with the Google Sheet data extract and append.
It is not tied to a specific spreadsheet.
This class is powered by pandas. Thus, make sure the data in the
spreadsheet is able to be processed by pandas.
Terminology:
Spreadsheet: The whole file. Same level as an Microsoft Excel file.
Sheet: A tab inside the spreadsheet. Same as Excel sheet.
A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of
cells in the spreadsheet, and is typically used in formulas.
https://developers.google.com/sheets/api/guides/concepts#a1_notation
"""
def __init__(
self,
creds=None,
credential_path="",
credential_scopes=["https://www.googleapis.com/auth/drive"],
token_prefix="GoogleDrive_",
token_suffix="",
):
"""
Initialize the credential.
If credential `creds` is provided, this method will use it directly
if it is valid.
Otherwise, it will use `credential_path` and `credential_scopes` to
get the token.
Args:
creds: None or google.oauth2.credentials.Credentials, default None
credential_path: String, default ''
Path to the credential with either 'token.pickle' or
'credentials.json' in it.
credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']
Scope of the credential. Default scope can
'See, edit, create, and delete all of your Google Drive files'.
Details:
https://developers.google.com/identity/protocols/oauth2/scopes#sheets
token_prefix: String, default 'GoogleDrive_'
Prefix of token file. eg. '{token_prefix}token.pickle'.
token_suffix: String, default ''
Suffix of token file. eg. 'token{token_suffix}.pickle'.
"""
if creds is not None and self.credential_validation(creds):
self.creds = creds
else:
self.creds = self.credential(
credential_path, credential_scopes, token_prefix, token_suffix
)
def create_spreadsheet(self, spreadsheet_name: str):
"""
Creates a spreadsheet, returning the newly created spreadsheet's ID.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create
Args:
spreadsheet_name: String
The name of the spreadsheet.
Return:
spreadsheet ID: String
"""
service = build("sheets", "v4", credentials=self.creds)
spreadsheet = {"properties": {"title": spreadsheet_name}}
spreadsheet = (
service.spreadsheets()
.create(body=spreadsheet, fields="spreadsheetId")
.execute()
)
return spreadsheet.get("spreadsheetId")
def search_spreadsheet(self, spreadsheet_name: str):
"""
Searche for the spreadsheet in Google Drive and return the spreadsheet ID.
Since it is using Google Drive API, the scope must include reading
files in Google Drive.
If you want customized query, use `GoogleDrive.search_file()` instead.
Args:
spreadsheet_name: String
The name of the spreadsheet. There is no file extension.
Return:
Dictionary.
Key: Spreadsheet name.
Value: List of spreadsheet ID in case there are duplicate file names.
"""
result = self.search_file(file_name=spreadsheet_name)
return result
def get_spreadsheet_property(self, spreadsheet_id: str):
"""
Get spreadsheet property and sheet property.
Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.
Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
Args:
spreadsheet_id: String
Spreadsheet ID.
Return:
Tuple: (spreadsheet_property, sheet_property)
spreadsheet_property: Dictionary
The entire spreadsheet property. It is the superset of the sheet property.
Structure of the response:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
sheet_property: Dictionary
sheetId: Dictionary, key: sheet name, value: sheet ID
The unique ID of each sheet regardless of position.
sheetIndex: Dictionary, key: sheet name, value: sheet index
The position of the sheet starting from 0.
sheetRowCount: Dictionary, key: sheet name, value: sheet row count
The numbder of rows in sheet. Note that this is not the number of
rows that contains data.It is the boundary of the sheet.
sheetColCount: Dictionary, key: sheet name, value: sheet column count
The numbder of columns in sheet. Note that this is not the number
of columns that contains data.It is the boundary of the sheet.
"""
service = build("sheets", "v4", credentials=self.creds)
request = service.spreadsheets().get(
spreadsheetId=spreadsheet_id, includeGridData=False
)
# Spreadsheet property
spreadsheet_property = request.execute()
# Sheet property
sheetId = {
d["properties"]["title"]: d["properties"]["sheetId"]
for d in spreadsheet_property["sheets"]
}
sheetIndex = {
d["properties"]["title"]: d["properties"]["index"]
for d in spreadsheet_property["sheets"]
}
sheetRowCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["rowCount"]
for d in spreadsheet_property["sheets"]
}
sheetColCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["columnCount"]
for d in spreadsheet_property["sheets"]
}
sheet_property = {
"sheetId": sheetId,
"sheetIndex": sheetIndex,
"sheetRowCount": sheetRowCount,
"sheetColCount": sheetColCount,
}
return spreadsheet_property, sheet_property
def download_spreadsheet(self, spreadsheet_id: str, save_as=""):
"""
Download the spreadsheet by given the spreadsheet ID
and return a file pointer or save it as a file.
Supported file formats: .xlsx, .csv, .pdf.
For unsupported file formats i.e. Open Office sheet,
sheet only, and HTML, use `GoogleDrive.download_file()`.
Official API guide:
https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive
Args:
spreadsheet_id: String
The spreadsheet ID.
save_as: String, default ''
'': Return a file pointer.
'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.
'CSV': Save as '{Spreadsheet name}.csv'. Return None.
First sheet only.
'PDF': Save as '{Spreadsheet name}.pdf'. Return None.
'*.xlsx': Save as '*.xlsx'. Return None.
'*.csv': Save as '*.csv'. Return None.
'*.pdf': Save as '*.pdf'. Return None.
Return:
None or file pointer depending on the `save_as`
"""
spreadsheet_name = self.get_file_metadata(
file_id=spreadsheet_id, fields="name"
)["name"]
mimeType = {
"Excel": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"Open Office sheet": "application/x-vnd.oasis.opendocument.spreadsheet",
"PDF": "application/pdf",
"CSV": "text/csv",
}
if save_as == "":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"]
)
elif save_as == "Excel":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["Excel"],
save_as="{0}.xlsx".format(spreadsheet_name),
)
elif save_as == "CSV":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["CSV"],
save_as="{0}.csv".format(spreadsheet_name),
)
elif save_as == "PDF":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["PDF"],
save_as="{0}.pdf".format(spreadsheet_name),
)
elif save_as[-5:] == ".xlsx":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"], save_as=save_as
)
elif save_as[-4:] == ".csv":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["CSV"], save_as=save_as
)
elif save_as[-4:] == ".pdf":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["PDF"], save_as=save_as
)
else:
raise Exception(
textwrap.dedent(
"""\
{0} is not a supported file format.
Please check the `GoogleSheet.download_spreadsheet()` docstring.
Or you may want to use `GoogleDrive.download_file()` method.\
""".format(
save_as
)
)
)
return result
def get_values(
self,
spreadsheet_id: str,
range_,
value_render_option=None,
date_time_render_option=None,
):
"""
Get a single value, a range of values, and several ranges of values.
Use `GoogleSheet.download_spreadsheet()` if you want to get the
entire spreadsheet.
Official API guide:
For single range:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
For multiple ranges:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
Example:
Get the entire sheet of `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'")
Get the value of cell `A5` in `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'!A5")
Args:
spreadsheet_id: String
range_: String or List of strings in A1 notation
String: A single sheet, A single range
List of strings: Several ranges
value_render_option: String, default None
How values should be represented in the output.
The default render option is `ValueRenderOption.FORMATTED_VALUE`.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
date_time_render_option: String, default None
How dates, times, and durations should be represented in the output.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Return:
ValueRange in Dictionary
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange
"""
service = build("sheets", "v4", credentials=self.creds)
# How values should be represented in the output.
# The default render option is ValueRenderOption.FORMATTED_VALUE.
value_render_option = value_render_option
# How dates, times, and durations should be represented in the output.
# This is ignored if value_render_option is
# FORMATTED_VALUE.
# The default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].
date_time_render_option = date_time_render_option
request = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=range_,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
)
result = request.execute()
return result
def clear_values(self, spreadsheet_id: str, range_):
"""
Clear values from a spreadsheet.
Only values are cleared -- all other properties of
the cell (such as formatting, data validation, etc..) are kept.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
Args:
spreadsheet_id: String
range_: String, A1 notation
Return:
Dictionary, cleared range
{
"spreadsheetId": string,
"clearedRange": string
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_clear_values_request_body = {
# The ranges to clear, in A1 notation.
"ranges": range_
# TODO: Add desired entries to the request body.
}
request = (
service.spreadsheets()
.values()
.batchClear(
spreadsheetId=spreadsheet_id, body=batch_clear_values_request_body
)
)
result = request.execute()
return result
def update_values(self, spreadsheet_id: str, data, value_input_option="RAW"):
"""
Sets values in a range of a spreadsheet.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
Args:
spreadsheet_id: String
data: ValueRange in Dictionary
{
"range": string,
"majorDimension": enum (Dimension),
"values": [
array
]
}
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange
Return:
Dictionary in structure:
{
"spreadsheetId": string,
"totalUpdatedRows": integer,
"totalUpdatedColumns": integer,
"totalUpdatedCells": integer,
"totalUpdatedSheets": integer,
"responses": [
{
object (UpdateValuesResponse)
}
]
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_update_values_request_body = {
# How the input data should be interpreted.
"value_input_option": value_input_option, # 'USER_ENTERED'
# The new values to apply to the spreadsheet.
"data": data,
}
request = (
service.spreadsheets()
.values()
.batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body
)
)
result = request.execute()
return result
def update_column_format(self):
"""
Update the column format.
Supported format: date, number, currency
Officail API guide:
https://developers.google.com/sheets/api/samples/formatting
https://developers.google.com/sheets/api/guides/formats
https://developers.google.com/sheets/api/guides/batchupdate
"""
pass
|
normal
|
{
"blob_id": "9e793bd0faef65dfe8ac4b722e50d2055837449f",
"index": 4701,
"step-1": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n <mask token>\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n <mask token>\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n <mask token>\n <mask token>\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n <mask token>\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n <mask token>\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n\n def get_values(self, spreadsheet_id: str, range_, value_render_option=\n None, date_time_render_option=None):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n value_render_option = value_render_option\n date_time_render_option = date_time_render_option\n request = service.spreadsheets().values().batchGet(spreadsheetId=\n spreadsheet_id, ranges=range_, valueRenderOption=\n value_render_option, dateTimeRenderOption=date_time_render_option)\n result = request.execute()\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option='RAW'\n ):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_update_values_request_body = {'value_input_option':\n value_input_option, 'data': data}\n request = service.spreadsheets().values().batchUpdate(spreadsheetId\n =spreadsheet_id, body=batch_update_values_request_body)\n result = request.execute()\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-4": "from googleAPI.drive import *\n\n\nclass GoogleSheet(GoogleDrive):\n \"\"\"\n The base class of Google Sheet API.\n \n It aims at dealing with the Google Sheet data extract and append.\n It is not tied to a specific spreadsheet.\n \n This class is powered by pandas. Thus, make sure the data in the\n spreadsheet is able to be processed by pandas.\n \n Terminology:\n Spreadsheet: The whole file. Same level as an Microsoft Excel file.\n Sheet: A tab inside the spreadsheet. Same as Excel sheet.\n A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of \n cells in the spreadsheet, and is typically used in formulas.\n https://developers.google.com/sheets/api/guides/concepts#a1_notation\n \"\"\"\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n\n def get_values(self, spreadsheet_id: str, range_, value_render_option=\n None, date_time_render_option=None):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n value_render_option = value_render_option\n date_time_render_option = date_time_render_option\n request = service.spreadsheets().values().batchGet(spreadsheetId=\n spreadsheet_id, ranges=range_, valueRenderOption=\n value_render_option, dateTimeRenderOption=date_time_render_option)\n result = request.execute()\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option='RAW'\n ):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_update_values_request_body = {'value_input_option':\n value_input_option, 'data': data}\n request = service.spreadsheets().values().batchUpdate(spreadsheetId\n =spreadsheet_id, body=batch_update_values_request_body)\n result = request.execute()\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-5": "from googleAPI.drive import *\n\n\nclass GoogleSheet(GoogleDrive):\n \"\"\"\n The base class of Google Sheet API.\n \n It aims at dealing with the Google Sheet data extract and append.\n It is not tied to a specific spreadsheet.\n \n This class is powered by pandas. Thus, make sure the data in the\n spreadsheet is able to be processed by pandas.\n \n Terminology:\n Spreadsheet: The whole file. Same level as an Microsoft Excel file.\n Sheet: A tab inside the spreadsheet. Same as Excel sheet.\n A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of \n cells in the spreadsheet, and is typically used in formulas.\n https://developers.google.com/sheets/api/guides/concepts#a1_notation\n \"\"\"\n\n def __init__(\n self,\n creds=None,\n credential_path=\"\",\n credential_scopes=[\"https://www.googleapis.com/auth/drive\"],\n token_prefix=\"GoogleDrive_\",\n token_suffix=\"\",\n ):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(\n credential_path, credential_scopes, token_prefix, token_suffix\n )\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n spreadsheet = {\"properties\": {\"title\": spreadsheet_name}}\n spreadsheet = (\n service.spreadsheets()\n .create(body=spreadsheet, fields=\"spreadsheetId\")\n .execute()\n )\n return spreadsheet.get(\"spreadsheetId\")\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n request = service.spreadsheets().get(\n spreadsheetId=spreadsheet_id, includeGridData=False\n )\n # Spreadsheet property\n spreadsheet_property = request.execute()\n\n # Sheet property\n sheetId = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"sheetId\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetIndex = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"index\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetRowCount = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"gridProperties\"][\"rowCount\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetColCount = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"gridProperties\"][\"columnCount\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheet_property = {\n \"sheetId\": sheetId,\n \"sheetIndex\": sheetIndex,\n \"sheetRowCount\": sheetRowCount,\n \"sheetColCount\": sheetColCount,\n }\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=\"\"):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(\n file_id=spreadsheet_id, fields=\"name\"\n )[\"name\"]\n mimeType = {\n \"Excel\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n \"Open Office sheet\": \"application/x-vnd.oasis.opendocument.spreadsheet\",\n \"PDF\": \"application/pdf\",\n \"CSV\": \"text/csv\",\n }\n\n if save_as == \"\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"Excel\"]\n )\n elif save_as == \"Excel\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"Excel\"],\n save_as=\"{0}.xlsx\".format(spreadsheet_name),\n )\n elif save_as == \"CSV\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"CSV\"],\n save_as=\"{0}.csv\".format(spreadsheet_name),\n )\n elif save_as == \"PDF\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"PDF\"],\n save_as=\"{0}.pdf\".format(spreadsheet_name),\n )\n elif save_as[-5:] == \".xlsx\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"Excel\"], save_as=save_as\n )\n elif save_as[-4:] == \".csv\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"CSV\"], save_as=save_as\n )\n elif save_as[-4:] == \".pdf\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"PDF\"], save_as=save_as\n )\n else:\n raise Exception(\n textwrap.dedent(\n \"\"\"\\\n {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method.\\\n \"\"\".format(\n save_as\n )\n )\n )\n return result\n\n def get_values(\n self,\n spreadsheet_id: str,\n range_,\n value_render_option=None,\n date_time_render_option=None,\n ):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n\n # How values should be represented in the output.\n # The default render option is ValueRenderOption.FORMATTED_VALUE.\n value_render_option = value_render_option\n\n # How dates, times, and durations should be represented in the output.\n # This is ignored if value_render_option is\n # FORMATTED_VALUE.\n # The default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].\n date_time_render_option = date_time_render_option\n\n request = (\n service.spreadsheets()\n .values()\n .batchGet(\n spreadsheetId=spreadsheet_id,\n ranges=range_,\n valueRenderOption=value_render_option,\n dateTimeRenderOption=date_time_render_option,\n )\n )\n result = request.execute()\n\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n\n batch_clear_values_request_body = {\n # The ranges to clear, in A1 notation.\n \"ranges\": range_\n # TODO: Add desired entries to the request body.\n }\n\n request = (\n service.spreadsheets()\n .values()\n .batchClear(\n spreadsheetId=spreadsheet_id, body=batch_clear_values_request_body\n )\n )\n result = request.execute()\n\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option=\"RAW\"):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n batch_update_values_request_body = {\n # How the input data should be interpreted.\n \"value_input_option\": value_input_option, # 'USER_ENTERED'\n # The new values to apply to the spreadsheet.\n \"data\": data,\n }\n\n request = (\n service.spreadsheets()\n .values()\n .batchUpdate(\n spreadsheetId=spreadsheet_id, body=batch_update_values_request_body\n )\n )\n result = request.execute()\n\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-ids": [
4,
8,
10,
12,
13
]
}
|
[
4,
8,
10,
12,
13
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-03 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Proceso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Nombre')),
('description', models.CharField(max_length=256, verbose_name='Descripci\xf3n')),
('deleted', models.BooleanField(default=False)),
],
),
]
|
normal
|
{
"blob_id": "f15ce7cec032ace65604771fa56e3d9969c98209",
"index": 1964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Proceso', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128, verbose_name='Nombre')), ('description', models.CharField(\n max_length=256, verbose_name='Descripción')), ('deleted', models.\n BooleanField(default=False))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Proceso', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128, verbose_name='Nombre')), ('description', models.CharField(\n max_length=256, verbose_name='Descripción')), ('deleted', models.\n BooleanField(default=False))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-08-03 10:25\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Proceso',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, verbose_name='Nombre')),\n ('description', models.CharField(max_length=256, verbose_name='Descripci\\xf3n')),\n ('deleted', models.BooleanField(default=False)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
# THRESHOLDING FUNCTION IMPLEMENTATION
def thresholding(img):
# visualizing image in HSV parameters
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the
# trackbar by running ColorPickerScript.py
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
# passing the values of lowerWhite and upperWhite to create the mask
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
# WARPING FUNCTION IMPLEMENTATION
def warpImg (img, points, w, h, inv=False):
pts1 = np.float32(points)
# defining the border coordinates of the warped image
pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])
# finding the transformation matrix
if inv:
#if inverted interchange pts2 and pts1
matrix = cv2.getPerspectiveTransform(pts2,pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1,pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w,h))
return imgWarp
# trackbar change will call nothing()
def nothing(a):
pass
# Creating the trackbars to find the optimal warping points.
# Care should be taken to choose points which are not very far from our current position
# ie. mostly lying in the bottom half region of the image since we should only confidently
# predict the lane warp present on the road at this point of time.
# create trackbars
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
# wT and hT are the target window dimensions ie. window with video
# create trackbar window
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars", 360, 240)
cv2.createTrackbar("Width Top", "Trackbars", initialTrackbarVals[0], wT//2, nothing)
cv2.createTrackbar("Height Top", "Trackbars", initialTrackbarVals[1], hT, nothing)
cv2.createTrackbar("Width Bottom", "Trackbars", initialTrackbarVals[2], wT//2, nothing)
cv2.createTrackbar("Height Bottom", "Trackbars", initialTrackbarVals[3], hT, nothing)
# find the value of trackbars (real-time)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos("Width Top", "Trackbars")
heightTop = cv2.getTrackbarPos("Height Top", "Trackbars")
widthBottom = cv2.getTrackbarPos("Width Bottom", "Trackbars")
heightBottom = cv2.getTrackbarPos("Height Bottom", "Trackbars")
# return the bounding coordinates
points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])
return points
# draw the warp points as red circles
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)
return img
# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)
def getHistogram(img, minPer=0.1, display= False, region=1):
# simply sum all the pixels in the y direction
if region == 1:
# find histvalues for the complete region
histValues = np.sum(img, axis=0)
else:
# find histvalues for ONLY the bottom (1/n)th region where n is region value
histValues = np.sum(img[img.shape[0]//region:,:], axis=0)
#print(histValues)
# Some of the pixels in our image might just be noise. So we don’t want to use them in our
# calculation. Therefore we will set a threshold value which will be the minimum value required
# for any column to qualify as part of the path and not noise. We can set a hard-coded value but
# it is better to get it based on the live data. So we will find the maximum sum value and
# multiply our user defined percentage to it to create our threshold value.
maxValue = np.max(histValues)
minValue = minPer*maxValue
# To get the value of the curvature we will find the indices of all the columns that have value
# more than our threshold and then we will average our indices.
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
#print(basePoint)
if display:
imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)
for x,intensity in enumerate(histValues):
cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)
cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)
return basePoint,imgHist
return basePoint
# stack all the display windows
# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM)
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range (0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
|
normal
|
{
"blob_id": "44175d2559f9c7d6171b6e45d24719d50dc80fb7",
"index": 7221,
"step-1": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\n<mask token>\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-3": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-4": "import cv2\nimport numpy as np\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-5": "import cv2\nimport numpy as np\n\n# THRESHOLDING FUNCTION IMPLEMENTATION\ndef thresholding(img):\n # visualizing image in HSV parameters\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the \n # trackbar by running ColorPickerScript.py\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n # passing the values of lowerWhite and upperWhite to create the mask\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n# WARPING FUNCTION IMPLEMENTATION\ndef warpImg (img, points, w, h, inv=False):\n pts1 = np.float32(points)\n # defining the border coordinates of the warped image\n pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])\n # finding the transformation matrix\n if inv:\n #if inverted interchange pts2 and pts1\n matrix = cv2.getPerspectiveTransform(pts2,pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1,pts2)\n \n imgWarp = cv2.warpPerspective(img, matrix, (w,h))\n return imgWarp\n\n# trackbar change will call nothing()\ndef nothing(a): \n pass\n\n# Creating the trackbars to find the optimal warping points.\n# Care should be taken to choose points which are not very far from our current position\n# ie. mostly lying in the bottom half region of the image since we should only confidently\n# predict the lane warp present on the road at this point of time.\n\n# create trackbars \ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240): \n # wT and hT are the target window dimensions ie. window with video\n # create trackbar window\n cv2.namedWindow(\"Trackbars\")\n cv2.resizeWindow(\"Trackbars\", 360, 240)\n cv2.createTrackbar(\"Width Top\", \"Trackbars\", initialTrackbarVals[0], wT//2, nothing)\n cv2.createTrackbar(\"Height Top\", \"Trackbars\", initialTrackbarVals[1], hT, nothing)\n cv2.createTrackbar(\"Width Bottom\", \"Trackbars\", initialTrackbarVals[2], wT//2, nothing)\n cv2.createTrackbar(\"Height Bottom\", \"Trackbars\", initialTrackbarVals[3], hT, nothing)\n\n# find the value of trackbars (real-time)\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos(\"Width Top\", \"Trackbars\")\n heightTop = cv2.getTrackbarPos(\"Height Top\", \"Trackbars\")\n widthBottom = cv2.getTrackbarPos(\"Width Bottom\", \"Trackbars\")\n heightBottom = cv2.getTrackbarPos(\"Height Bottom\", \"Trackbars\")\n # return the bounding coordinates\n points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])\n return points\n\n# draw the warp points as red circles\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)\n return img\n\n# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)\ndef getHistogram(img, minPer=0.1, display= False, region=1): \n # simply sum all the pixels in the y direction\n if region == 1:\n # find histvalues for the complete region\n histValues = np.sum(img, axis=0)\n else:\n # find histvalues for ONLY the bottom (1/n)th region where n is region value\n histValues = np.sum(img[img.shape[0]//region:,:], axis=0)\n \n #print(histValues)\n \n # Some of the pixels in our image might just be noise. So we don’t want to use them in our \n # calculation. Therefore we will set a threshold value which will be the minimum value required\n # for any column to qualify as part of the path and not noise. We can set a hard-coded value but\n # it is better to get it based on the live data. So we will find the maximum sum value and \n # multiply our user defined percentage to it to create our threshold value.\n maxValue = np.max(histValues)\n minValue = minPer*maxValue\n \n # To get the value of the curvature we will find the indices of all the columns that have value \n # more than our threshold and then we will average our indices.\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n #print(basePoint)\n \n if display:\n imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)\n for x,intensity in enumerate(histValues):\n cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)\n cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)\n return basePoint,imgHist\n \n return basePoint\n \n# stack all the display windows\n# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM) \ndef stackImages(scale,imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range (0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank]*rows\n hor_con = [imageBlank]*rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n ver = hor\n return ver",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
import os, pygame
import sys
from os import path
from random import choice
WIDTH = 1000
HEIGHT = 800
FPS = 60
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GRAY80 = (204, 204, 204)
GRAY = (26, 26, 26)
screen = pygame.display.set_mode((1000, 800))
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def button(msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
def main():
# Initialise screen
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
# Fill background
background = pygame.image.load(os.path.join(img_folder, "STARS1.jpg")).convert_alpha()
clock = pygame.time.Clock()
start_ticks=pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "Credits", 60, 500, 100)
draw_text(screen, "Vincent", 30, 500, 250)
draw_text(screen, "Chevery", 30, 500, 330)
draw_text(screen, "Charlie", 30, 500, 410)
draw_text(screen, "Julian", 30, 500, 490)
draw_text(screen, "Sheriyar", 30, 500, 570)
draw_text(screen, "Julian", 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400+190 > mouse[0] > 400 and 650+60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80,(400,650,190,60))
else:
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "EXIT", 40, 488, 660)
#screen.blit(arrow, imagerect)
button("EXIT",400,650,190,60,GRAY,GRAY80,quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
|
normal
|
{
"blob_id": "7301a521586049ebb5e8e49b604cc96e3acc1fe9",
"index": 3512,
"step-1": "<mask token>\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-3": "<mask token>\nWIDTH = 1000\nHEIGHT = 800\nFPS = 60\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nGRAY80 = 204, 204, 204\nGRAY = 26, 26, 26\nscreen = pygame.display.set_mode((1000, 800))\ngame_folder = os.path.dirname(__file__)\nimg_folder = os.path.join(game_folder, 'img')\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-4": "import os, pygame\nimport sys\nfrom os import path\nfrom random import choice\nWIDTH = 1000\nHEIGHT = 800\nFPS = 60\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nGRAY80 = 204, 204, 204\nGRAY = 26, 26, 26\nscreen = pygame.display.set_mode((1000, 800))\ngame_folder = os.path.dirname(__file__)\nimg_folder = os.path.join(game_folder, 'img')\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-5": "import os, pygame\r\nimport sys\r\nfrom os import path\r\nfrom random import choice\r\n\r\nWIDTH = 1000\r\nHEIGHT = 800\r\nFPS = 60\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nGRAY80 = (204, 204, 204)\r\nGRAY = (26, 26, 26)\r\n\r\n\r\nscreen = pygame.display.set_mode((1000, 800))\r\ngame_folder = os.path.dirname(__file__)\r\nimg_folder = os.path.join(game_folder, \"img\")\r\n\r\ndef draw_text(surf, text, size, x, y):\r\n font_name = pygame.font.match_font('OCR A Extended')\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n surf.blit(text_surface, text_rect)\r\n\r\ndef button(msg,x,y,w,h,ic,ac,action=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n print(click)\r\n\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\r\n pygame.draw.rect(screen, ac,(x,y,w,h))\r\n if click[0] == 1 and action != None:\r\n if action == quit:\r\n pygame.quit()\r\n quit()\r\n else:\r\n pygame.draw.rect(screen, ic,(x,y,w,h))\r\n\r\ndef main():\r\n # Initialise screen\r\n pygame.init()\r\n pygame.mixer.init()\r\n screen = pygame.display.set_mode((1000, 800))\r\n pygame.display.set_caption('Credits')\r\n\r\n # Fill background\r\n background = pygame.image.load(os.path.join(img_folder, \"STARS1.jpg\")).convert_alpha()\r\n clock = pygame.time.Clock()\r\n start_ticks=pygame.time.get_ticks()\r\n screen.blit(background, (0, 0))\r\n pygame.display.flip()\r\n running = True\r\n\r\n while running:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n return\r\n quit()\r\n\r\n screen.blit(background, (0, 0))\r\n pygame.draw.rect(screen, GRAY,(400,650,190,60))\r\n draw_text(screen, \"Credits\", 60, 500, 100)\r\n draw_text(screen, \"Vincent\", 30, 500, 250)\r\n draw_text(screen, \"Chevery\", 30, 500, 330)\r\n draw_text(screen, \"Charlie\", 30, 500, 410)\r\n draw_text(screen, \"Julian\", 30, 500, 490)\r\n draw_text(screen, \"Sheriyar\", 30, 500, 570)\r\n draw_text(screen, \"Julian\", 30, 500, 650)\r\n\r\n mouse = pygame.mouse.get_pos()\r\n\r\n if 400+190 > mouse[0] > 400 and 650+60 > mouse[1] > 650:\r\n pygame.draw.rect(screen, GRAY80,(400,650,190,60))\r\n else:\r\n pygame.draw.rect(screen, GRAY,(400,650,190,60))\r\n \r\n draw_text(screen, \"EXIT\", 40, 488, 660)\r\n #screen.blit(arrow, imagerect)\r\n button(\"EXIT\",400,650,190,60,GRAY,GRAY80,quit)\r\n \r\n pygame.display.flip()\r\n\r\nif __name__ == '__main__': \r\n main()\r\n pygame.quit()\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from celery import shared_task
from djcelery.models import PeriodicTask, CrontabSchedule
import datetime
from django.db.models import Max, Count
from services import *
# 测试任务
@shared_task()
def excute_sql(x,y):
print "%d * %d = %d" % (x, y, x * y)
return x * y
# 监控任务:查询数据库并进行告警
@shared_task()
def monitor_sql(*args, **kwargs):
print kwargs["name"]
print kwargs["sql"]
task_name = kwargs["name"]
datasource = kwargs["datasource"]
sql = kwargs["sql"]
operator = kwargs["operator"]
threshold = kwargs["threshold"]
tasks = PeriodicTask.objects.filter(name=task_name)
if tasks.exists():
task = tasks[0]
data = get_sql_data(datasource, sql)
# -1:表示没查到数据,无法判断是否异常
sql_result = -1
monitor_result = -1
if len(data) > 0:
sql_result = data[0][0]
monitor_result = 0
# 达到设定阈值
if operator == ">=":
if sql_result >= threshold:
monitor_result = 1 # 异常
# 小于设定阈值
elif operator == "<":
if sql_result < threshold:
monitor_result = 1 # 异常
# 查询记录不变
elif operator == "==":
task_results = TaskResult.objects.filter(task_id=task.id)
if task_results.exists():
task_result_before = task_results.latest('last_run_time')
sql_data_before = task_result_before.sql_data
if sql_result == sql_data_before:
monitor_result = 1 # 异常
# 保存采集数据
task_result = TaskResult(task_id=task.id, task_name=task.name, last_run_time=datetime.datetime.now(),
operator=operator, threshold=threshold, sql_data=sql_result,
monitor_result=monitor_result)
task_result.save()
return sql_result
|
normal
|
{
"blob_id": "6f259210cbe8969046cba1031ab42d77e913abea",
"index": 6265,
"step-1": "# -*- coding: utf-8 -*-\nfrom celery import shared_task\nfrom djcelery.models import PeriodicTask, CrontabSchedule\nimport datetime\nfrom django.db.models import Max, Count\n\nfrom services import *\n\n\n# 测试任务\n@shared_task()\ndef excute_sql(x,y):\n print \"%d * %d = %d\" % (x, y, x * y)\n return x * y\n\n\n# 监控任务:查询数据库并进行告警\n@shared_task()\ndef monitor_sql(*args, **kwargs):\n print kwargs[\"name\"]\n print kwargs[\"sql\"]\n\n task_name = kwargs[\"name\"]\n datasource = kwargs[\"datasource\"]\n sql = kwargs[\"sql\"]\n operator = kwargs[\"operator\"]\n threshold = kwargs[\"threshold\"]\n tasks = PeriodicTask.objects.filter(name=task_name)\n if tasks.exists():\n task = tasks[0]\n data = get_sql_data(datasource, sql)\n # -1:表示没查到数据,无法判断是否异常\n sql_result = -1\n monitor_result = -1\n if len(data) > 0:\n sql_result = data[0][0]\n\n monitor_result = 0\n # 达到设定阈值\n if operator == \">=\":\n if sql_result >= threshold:\n monitor_result = 1 # 异常\n\n # 小于设定阈值\n elif operator == \"<\":\n if sql_result < threshold:\n monitor_result = 1 # 异常\n\n # 查询记录不变\n elif operator == \"==\":\n task_results = TaskResult.objects.filter(task_id=task.id)\n if task_results.exists():\n task_result_before = task_results.latest('last_run_time')\n sql_data_before = task_result_before.sql_data\n if sql_result == sql_data_before:\n monitor_result = 1 # 异常\n\n # 保存采集数据\n task_result = TaskResult(task_id=task.id, task_name=task.name, last_run_time=datetime.datetime.now(),\n operator=operator, threshold=threshold, sql_data=sql_result,\n monitor_result=monitor_result)\n task_result.save()\n\n return sql_result\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
import json
import dateutil.parser
from tests.controllers.controller_test_base import ControllerTestBase
from tests.test_helper import TestHelper
from tests.http_response_catcher import HttpResponseCatcher
from plastiqpublicapi.api_helper import APIHelper
from plastiqpublicapi.controllers.categories_controller import CategoriesController
class CategoriesControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(CategoriesControllerTests, cls).setUpClass()
cls.response_catcher = HttpResponseCatcher()
cls.controller = CategoriesController(cls.config, cls.response_catcher)
# Retrieve a paginated list of Categories by query parameter(s)
def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self):
# Perform the API call through the SDK function
result = self.controller.retrieve_a_paginated_list_of_categories_by_query_parameter_s()
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['trace-id'] = None
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
|
normal
|
{
"blob_id": "a4f2418e746cc43bd407b6a212de9802044351e1",
"index": 3928,
"step-1": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self\n ):\n result = (self.controller.\n retrieve_a_paginated_list_of_categories_by_query_parameter_s())\n self.assertEquals(self.response_catcher.response.status_code, 200)\n expected_headers = {}\n expected_headers['trace-id'] = None\n expected_headers['content-type'] = 'application/json'\n self.assertTrue(TestHelper.match_headers(expected_headers, self.\n response_catcher.response.headers))\n",
"step-4": "<mask token>\nimport json\nimport dateutil.parser\nfrom tests.controllers.controller_test_base import ControllerTestBase\nfrom tests.test_helper import TestHelper\nfrom tests.http_response_catcher import HttpResponseCatcher\nfrom plastiqpublicapi.api_helper import APIHelper\nfrom plastiqpublicapi.controllers.categories_controller import CategoriesController\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self\n ):\n result = (self.controller.\n retrieve_a_paginated_list_of_categories_by_query_parameter_s())\n self.assertEquals(self.response_catcher.response.status_code, 200)\n expected_headers = {}\n expected_headers['trace-id'] = None\n expected_headers['content-type'] = 'application/json'\n self.assertTrue(TestHelper.match_headers(expected_headers, self.\n response_catcher.response.headers))\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nplastiqpublicapi\r\n\r\nThis file was automatically generated by APIMATIC v3.0 (\r\n https://www.apimatic.io ).\r\n\"\"\"\r\n\r\nimport json\r\nimport dateutil.parser\r\n\r\nfrom tests.controllers.controller_test_base import ControllerTestBase\r\nfrom tests.test_helper import TestHelper\r\nfrom tests.http_response_catcher import HttpResponseCatcher\r\nfrom plastiqpublicapi.api_helper import APIHelper\r\nfrom plastiqpublicapi.controllers.categories_controller import CategoriesController\r\n\r\n\r\nclass CategoriesControllerTests(ControllerTestBase):\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n super(CategoriesControllerTests, cls).setUpClass()\r\n cls.response_catcher = HttpResponseCatcher()\r\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\r\n\r\n # Retrieve a paginated list of Categories by query parameter(s)\r\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self):\r\n\r\n # Perform the API call through the SDK function\r\n result = self.controller.retrieve_a_paginated_list_of_categories_by_query_parameter_s()\r\n\r\n # Test response code\r\n self.assertEquals(self.response_catcher.response.status_code, 200)\r\n\r\n # Test headers\r\n expected_headers = {}\r\n expected_headers['trace-id'] = None\r\n expected_headers['content-type'] = 'application/json'\r\n\r\n self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
a = int(input("Enter no. of over: "))
print("total ball:",a*6 )
import random
comp_runs = random.randint(0,36)
print("computer's run:" ,comp_runs)
comp_runs = comp_runs+1
print("runs need to win:",comp_runs)
chances_1 = a*6
no_of_chances_1 = 0
your_runs = 0
print("-----------------------------------------------\nYour Batting\n")
while no_of_chances_1 < chances_1:
runs = int(input("Enter Runs for Your Batting Turn: "))
comp_bowl = random.randint(1,6)
if runs == comp_bowl:
print("Computer Guess: ", comp_bowl)
print("You are Out. Your Total Runs= ", your_runs, "\n")
break
elif runs > 10:
print("ALERT!! Support No only till 10\n")
continue
else:
your_runs = your_runs + runs
print("Computer Guess: ", comp_bowl)
print("Your runs Now are: ", your_runs, "\n")
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
#after the over ends now result time
print("\n-----------------------------------------------\nRESULTS: ")
if comp_runs < your_runs:
print("You won the Game.")
elif comp_runs == your_runs:
print("The Game is a Tie")
else:
print("Computer won the Game.")
|
normal
|
{
"blob_id": "00312f57e8a78444937f46cecb62a2b684b4fc91",
"index": 8779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('total ball:', a * 6)\n<mask token>\nprint(\"computer's run:\", comp_runs)\n<mask token>\nprint('runs need to win:', comp_runs)\n<mask token>\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-3": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\n<mask token>\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-4": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\nimport random\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-5": "a = int(input(\"Enter no. of over: \"))\r\nprint(\"total ball:\",a*6 )\r\nimport random\r\n\r\ncomp_runs = random.randint(0,36)\r\nprint(\"computer's run:\" ,comp_runs)\r\ncomp_runs = comp_runs+1\r\nprint(\"runs need to win:\",comp_runs)\r\nchances_1 = a*6\r\nno_of_chances_1 = 0\r\nyour_runs = 0\r\n\r\nprint(\"-----------------------------------------------\\nYour Batting\\n\")\r\nwhile no_of_chances_1 < chances_1:\r\n\r\n runs = int(input(\"Enter Runs for Your Batting Turn: \"))\r\n comp_bowl = random.randint(1,6)\r\n\r\n if runs == comp_bowl:\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"You are Out. Your Total Runs= \", your_runs, \"\\n\")\r\n break\r\n elif runs > 10:\r\n print(\"ALERT!! Support No only till 10\\n\")\r\n continue\r\n else:\r\n your_runs = your_runs + runs\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"Your runs Now are: \", your_runs, \"\\n\")\r\n if comp_runs < your_runs:\r\n break\r\n\r\n no_of_chances_1 = no_of_chances_1 + 1\r\n\r\n#after the over ends now result time\r\n\r\nprint(\"\\n-----------------------------------------------\\nRESULTS: \")\r\n\r\nif comp_runs < your_runs:\r\n print(\"You won the Game.\")\r\n\r\nelif comp_runs == your_runs:\r\n print(\"The Game is a Tie\")\r\n\r\nelse:\r\n print(\"Computer won the Game.\")\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tensorflow as tf
import keras
import numpy as np
def house_model(y_new):
xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here#
ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here#
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here#
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs,ys, epochs=100)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
|
normal
|
{
"blob_id": "0b3f16ee9b287c6c77acde674abec9deb4053c83",
"index": 946,
"step-1": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<mask token>\nprint(prediction)\n",
"step-3": "<mask token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n",
"step-4": "import tensorflow as tf\nimport keras\nimport numpy as np\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n",
"step-5": "import tensorflow as tf\nimport keras\nimport numpy as np\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here#\n ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here#\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here#\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs,ys, epochs=100)\n return model.predict(y_new)[0]\n\nprediction = house_model([7.0])\nprint(prediction)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Flask, abort, url_for, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('login.html')
# #redirect demo
# @app.route('/login', methods=['POST', 'GET'])
# def login():
# if request.method == 'POST' and request.form['username'] == 'admin':
# return redirect(url_for('success'))
# return redirect(url_for('index'), 302)
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
if request.form['username'] == 'admin':
return redirect(url_for('success'))
else:
abort(401)
else:
return redirect(url_for('index'))
@app.route('/success')
def success():
return'logged in successfully'
# from flask import Flask, render_template, request
# from werkzeug.utils import secure_filename
# import os
# app = Flask(__name__)
# app.config['UPLOAD_FOLDER'] = os.getcwd()+'/media/'
# app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024 #限制大小16MB
# @app.route('/')
# def upload():
# return render_template('upload.html')
# @app.route('/uploader', methods = ['GET', 'POST'])
# def upload_file():
# if request.method == 'POST':
# f = request.files['file']
# f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))
# return 'flie uploaded successfully'
|
normal
|
{
"blob_id": "82f8bfd95fea3025bed2b4583c20526b0bd5484f",
"index": 3535,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('login.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('login.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == 'admin':\n return redirect(url_for('success'))\n else:\n abort(401)\n else:\n return redirect(url_for('index'))\n\n\[email protected]('/success')\ndef success():\n return 'logged in successfully'\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('login.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == 'admin':\n return redirect(url_for('success'))\n else:\n abort(401)\n else:\n return redirect(url_for('index'))\n\n\[email protected]('/success')\ndef success():\n return 'logged in successfully'\n",
"step-4": "from flask import Flask, abort, url_for, render_template, request\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('login.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == 'admin':\n return redirect(url_for('success'))\n else:\n abort(401)\n else:\n return redirect(url_for('index'))\n\n\[email protected]('/success')\ndef success():\n return 'logged in successfully'\n",
"step-5": "from flask import Flask, abort, url_for, render_template, request\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n return render_template('login.html')\n\n# #redirect demo\n# @app.route('/login', methods=['POST', 'GET'])\n# def login():\n# if request.method == 'POST' and request.form['username'] == 'admin':\n# return redirect(url_for('success'))\n# return redirect(url_for('index'), 302)\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == 'admin':\n return redirect(url_for('success'))\n else:\n abort(401)\n else:\n return redirect(url_for('index'))\n\[email protected]('/success')\ndef success():\n return'logged in successfully'\n\n\n\n\n\n# from flask import Flask, render_template, request\n# from werkzeug.utils import secure_filename\n# import os\n# app = Flask(__name__)\n\n# app.config['UPLOAD_FOLDER'] = os.getcwd()+'/media/'\n# app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024 #限制大小16MB\n\n# @app.route('/')\n# def upload():\n# return render_template('upload.html')\n\n# @app.route('/uploader', methods = ['GET', 'POST'])\n# def upload_file():\n# if request.method == 'POST':\n# f = request.files['file']\n# f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))\n# return 'flie uploaded successfully'",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.